mirror of
https://github.com/zhaofengli/attic.git
synced 2024-12-14 11:57:30 +00:00
Initial public commit
This commit is contained in:
commit
5aa1623dc2
135 changed files with 15956 additions and 0 deletions
3
.cargo/config
Normal file
3
.cargo/config
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
[build]
|
||||||
|
rustflags = ["--cfg", "tokio_unstable"]
|
||||||
|
rustdocflags = ["--cfg", "tokio_unstable"]
|
29
.editorconfig
Normal file
29
.editorconfig
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
# EditorConfig configuration for Attic
|
||||||
|
|
||||||
|
# Top-most EditorConfig file
|
||||||
|
root = true
|
||||||
|
|
||||||
|
# Unix-style newlines with a newline ending every file, utf-8 charset
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
insert_final_newline = true
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
charset = utf-8
|
||||||
|
|
||||||
|
# Rust
|
||||||
|
[*.rs]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
|
||||||
|
# Misc
|
||||||
|
[*.{yaml,yml,md,nix}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
|
||||||
|
[attic/src/nix_store/tests/nar/**]
|
||||||
|
charset = unset
|
||||||
|
end_of_line = unset
|
||||||
|
insert_final_newline = unset
|
||||||
|
trim_trailing_whitespace = unset
|
||||||
|
indent_style = unset
|
||||||
|
indent_size = unset
|
5
.envrc
Normal file
5
.envrc
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
if ! has nix_direnv_version || ! nix_direnv_version 2.2.0; then
|
||||||
|
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.2.0/direnvrc" "sha256-5EwyKnkJNQeXrRkYbwwRBcXbibosCJqyIUuz9Xq+LRc="
|
||||||
|
fi
|
||||||
|
|
||||||
|
use_flake
|
46
.github/workflows/book.yml
vendored
Normal file
46
.github/workflows/book.yml
vendored
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
name: Deploy Book
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pages: write
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy-unstable:
|
||||||
|
name: Deploy
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository == 'zhaofengli/attic'
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.0.2
|
||||||
|
|
||||||
|
- name: Obtain current username
|
||||||
|
run: |
|
||||||
|
echo ACTION_USER=$USER >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- uses: DeterminateSystems/nix-installer@main
|
||||||
|
with:
|
||||||
|
extra-conf: |
|
||||||
|
trusted-users = root ${{ env.ACTION_USER }}
|
||||||
|
substituters = https://staging.attic.rs/attic-ci https://cache.nixos.org
|
||||||
|
trusted-public-keys = attic-ci:U5Sey4mUxwBXM3iFapmP0/ogODXywKLRNgRPQpEXxbo= cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
|
||||||
|
|
||||||
|
# == Manual
|
||||||
|
- name: Build book
|
||||||
|
run: nix build .#book -L
|
||||||
|
|
||||||
|
- name: Copy book artifact
|
||||||
|
run: |
|
||||||
|
cp --recursive --dereference --no-preserve=mode,ownership result public
|
||||||
|
|
||||||
|
- name: Upload book artifact
|
||||||
|
uses: actions/upload-pages-artifact@v1.0.7
|
||||||
|
with:
|
||||||
|
path: public
|
||||||
|
|
||||||
|
- name: Deploy book
|
||||||
|
uses: actions/deploy-pages@v1.2.3
|
27
.github/workflows/build.yml
vendored
Normal file
27
.github/workflows/build.yml
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
name: Build
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
jobs:
|
||||||
|
tests:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- ubuntu-latest
|
||||||
|
- macos-11
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2.3.5
|
||||||
|
|
||||||
|
- name: Obtain current username
|
||||||
|
run: |
|
||||||
|
echo ACTION_USER=$USER >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- uses: DeterminateSystems/nix-installer@main
|
||||||
|
with:
|
||||||
|
extra-conf: |
|
||||||
|
trusted-users = root ${{ env.ACTION_USER }}
|
||||||
|
substituters = https://staging.attic.rs/attic-ci https://cache.nixos.org
|
||||||
|
trusted-public-keys = attic-ci:U5Sey4mUxwBXM3iFapmP0/ogODXywKLRNgRPQpEXxbo= cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
|
||||||
|
|
||||||
|
- run: nix develop --command -- cargo test
|
6
.gitignore
vendored
Normal file
6
.gitignore
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
.direnv
|
||||||
|
|
||||||
|
/target
|
||||||
|
result
|
||||||
|
|
||||||
|
fly.toml
|
4008
Cargo.lock
generated
Normal file
4008
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
11
Cargo.toml
Normal file
11
Cargo.toml
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
[workspace]
|
||||||
|
|
||||||
|
members = [
|
||||||
|
"attic",
|
||||||
|
"client",
|
||||||
|
"server",
|
||||||
|
]
|
||||||
|
|
||||||
|
[profile.dev]
|
||||||
|
opt-level = 2
|
||||||
|
incremental = true
|
13
LICENSE
Normal file
13
LICENSE
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
Copyright 2022 Zhaofeng Li and the Attic contributors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
35
README.md
Normal file
35
README.md
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
# Attic
|
||||||
|
|
||||||
|
**Attic** is a self-hostable Nix Binary Cache server backed by an S3-compatible storage provider.
|
||||||
|
It has support for global deduplication and garbage collection.
|
||||||
|
|
||||||
|
Attic is an early prototype.
|
||||||
|
|
||||||
|
```
|
||||||
|
⚙️ Pushing 5 paths to "demo" on "local" (566 already cached, 2001 in upstream)...
|
||||||
|
✅ gnvi1x7r8kl3clzx0d266wi82fgyzidv-steam-run-fhs (29.69 MiB/s)
|
||||||
|
✅ rw7bx7ak2p02ljm3z4hhpkjlr8rzg6xz-steam-fhs (30.56 MiB/s)
|
||||||
|
✅ y92f9y7qhkpcvrqhzvf6k40j6iaxddq8-0p36ammvgyr55q9w75845kw4fw1c65ln-source (19.96 MiB/s)
|
||||||
|
🕒 vscode-1.74.2 ███████████████████████████████████████ 345.66 MiB (41.32 MiB/s)
|
||||||
|
🕓 zoom-5.12.9.367 ███████████████████████████ 329.36 MiB (39.47 MiB/s)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Try it out (15 minutes)
|
||||||
|
|
||||||
|
Let's [spin up Attic](https://docs.attic.rs/tutorial.html) in just 15 minutes.
|
||||||
|
And yes, it works on macOS too!
|
||||||
|
|
||||||
|
## Goals
|
||||||
|
|
||||||
|
- **Multi-Tenancy**: Create a private cache for yourself, and one for friends and co-workers. Tenants are mutually untrusting and cannot pollute the views of other caches.
|
||||||
|
- **Global Deduplication**: Individual caches (tenants) are simply restricted views of the content-addressed global cache. When paths are uploaded, a mapping is created to grant the local cache access to the global NAR.
|
||||||
|
- **Managed Signing**: Signing is done on-the-fly by the server when store paths are fetched. The user pushing store paths does not have access to the signing key.
|
||||||
|
- **Scalabilty**: Attic can be easily replicated. It's designed to be deployed to serverless platforms like fly.io but also works nicely in a single-machine setup.
|
||||||
|
- **Garbage Collection**: Unused store paths can be garbage-collected in an LRU manner.
|
||||||
|
|
||||||
|
## Licensing
|
||||||
|
|
||||||
|
Attic is available under the **Apache License, Version 2.0**.
|
||||||
|
See `LICENSE` for details.
|
||||||
|
|
||||||
|
By contributing to the project, you agree to license your work under the aforementioned licenses.
|
49
attic/Cargo.toml
Normal file
49
attic/Cargo.toml
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
[package]
|
||||||
|
name = "attic"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
base64 = "0.20.0"
|
||||||
|
displaydoc = "0.2.3"
|
||||||
|
digest = "0.10.6"
|
||||||
|
ed25519-compact = "2.0.4"
|
||||||
|
futures = "0.3.25"
|
||||||
|
hex = "0.4.3"
|
||||||
|
lazy_static = "1.4.0"
|
||||||
|
log = "0.4.17"
|
||||||
|
nix-base32 = { git = "https://github.com/zhaofengli/nix-base32.git", rev = "b850c6e9273d1c39bd93abb704a53345f5be92eb" }
|
||||||
|
regex = "1.7.0"
|
||||||
|
serde = { version = "1.0.151", features = ["derive"] }
|
||||||
|
serde_yaml = "0.9.16"
|
||||||
|
sha2 = "0.10.6"
|
||||||
|
tempfile = "3"
|
||||||
|
wildmatch = "2.1.1"
|
||||||
|
xdg = "2.4.1"
|
||||||
|
|
||||||
|
# Native libnixstore bindings.
|
||||||
|
cxx = { version = "1.0", optional = true }
|
||||||
|
|
||||||
|
[dependencies.tokio]
|
||||||
|
version = "1.23.0"
|
||||||
|
features = [
|
||||||
|
"full",
|
||||||
|
]
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
serde_json = "1.0.91"
|
||||||
|
tokio-test = "0.4.2"
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
bindgen = { version = "0.63.0", optional = true }
|
||||||
|
cxx-build = { version = "1.0", optional = true }
|
||||||
|
pkg-config = "0.3.26"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = [ "nix_store" ]
|
||||||
|
|
||||||
|
# Native libnixstore bindings.
|
||||||
|
#
|
||||||
|
# When disabled, the native Rust portions of nix_store can still be used.
|
||||||
|
nix_store = [ "cxx", "bindgen", "cxx-build" ]
|
98
attic/build.rs
Normal file
98
attic/build.rs
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
//! Build script.
|
||||||
|
//!
|
||||||
|
//! We link against libnixstore to perform actions on the Nix Store.
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
use bindgen::callbacks::{EnumVariantValue, ParseCallbacks};
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
build_bridge();
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
run_bindgen();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct TransformNix;
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
impl ParseCallbacks for TransformNix {
|
||||||
|
fn enum_variant_name(
|
||||||
|
&self,
|
||||||
|
enum_name: Option<&str>,
|
||||||
|
original_variant_name: &str,
|
||||||
|
_variant_value: EnumVariantValue,
|
||||||
|
) -> Option<String> {
|
||||||
|
match enum_name {
|
||||||
|
Some("HashType") => {
|
||||||
|
let t = match original_variant_name {
|
||||||
|
"htUnknown" => "Unknown",
|
||||||
|
"htMD5" => "Md5",
|
||||||
|
"htSHA1" => "Sha1",
|
||||||
|
"htSHA256" => "Sha256",
|
||||||
|
"htSHA512" => "Sha512",
|
||||||
|
x => panic!("Unknown hash type {} - Add it in build.rs", x),
|
||||||
|
};
|
||||||
|
Some(t.to_owned())
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn include_file(&self, filename: &str) {
|
||||||
|
println!("cargo:rerun-if-changed={}", filename);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
fn build_bridge() {
|
||||||
|
cxx_build::bridge("src/nix_store/bindings/mod.rs")
|
||||||
|
.file("src/nix_store/bindings/nix.cpp")
|
||||||
|
.flag("-std=c++17")
|
||||||
|
.flag("-O2")
|
||||||
|
.flag("-include")
|
||||||
|
.flag("nix/config.h")
|
||||||
|
.compile("nixbinding");
|
||||||
|
|
||||||
|
println!("cargo:rerun-if-changed=src/nix_store/bindings");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
fn run_bindgen() {
|
||||||
|
use std::env;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
|
||||||
|
|
||||||
|
let headers = vec!["src/nix_store/bindings/bindgen.hpp"];
|
||||||
|
|
||||||
|
let mut builder = bindgen::Builder::default()
|
||||||
|
.clang_arg("-std=c++17")
|
||||||
|
.clang_arg("-include")
|
||||||
|
.clang_arg("nix/config.h")
|
||||||
|
.opaque_type("std::.*")
|
||||||
|
.allowlist_type("nix::Hash")
|
||||||
|
.rustified_enum("nix::HashType")
|
||||||
|
.disable_name_namespacing()
|
||||||
|
.layout_tests(false)
|
||||||
|
.parse_callbacks(Box::new(TransformNix));
|
||||||
|
|
||||||
|
for header in headers {
|
||||||
|
builder = builder.header(header);
|
||||||
|
println!("cargo:rerun-if-changed={}", header);
|
||||||
|
}
|
||||||
|
|
||||||
|
let bindings = builder.generate().expect("Failed to generate Nix bindings");
|
||||||
|
|
||||||
|
bindings
|
||||||
|
.write_to_file(out_path.join("bindgen.rs"))
|
||||||
|
.expect("Failed to write bindings");
|
||||||
|
|
||||||
|
// the -l flags must be after -lnixbinding
|
||||||
|
pkg_config::Config::new()
|
||||||
|
.atleast_version("2.4")
|
||||||
|
.probe("nix-store")
|
||||||
|
.unwrap();
|
||||||
|
}
|
1
attic/src/api/mod.rs
Normal file
1
attic/src/api/mod.rs
Normal file
|
@ -0,0 +1 @@
|
||||||
|
pub mod v1;
|
136
attic/src/api/v1/cache_config.rs
Normal file
136
attic/src/api/v1/cache_config.rs
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
//! Cache configuration endpoint.
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::signing::NixKeypair;
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct CreateCacheRequest {
|
||||||
|
/// The keypair of the cache.
|
||||||
|
pub keypair: KeypairConfig,
|
||||||
|
|
||||||
|
/// Whether the cache is public or not.
|
||||||
|
///
|
||||||
|
/// Anonymous clients are implicitly granted the "pull"
|
||||||
|
/// permission to public caches.
|
||||||
|
pub is_public: bool,
|
||||||
|
|
||||||
|
/// The Nix store path this binary cache uses.
|
||||||
|
///
|
||||||
|
/// This is usually `/nix/store`.
|
||||||
|
pub store_dir: String,
|
||||||
|
|
||||||
|
/// The priority of the binary cache.
|
||||||
|
///
|
||||||
|
/// A lower number denotes a higher priority.
|
||||||
|
/// <https://cache.nixos.org> has a priority of 40.
|
||||||
|
pub priority: i32,
|
||||||
|
|
||||||
|
/// A list of signing key names of upstream caches.
|
||||||
|
///
|
||||||
|
/// The list serves as a hint to clients to avoid uploading
|
||||||
|
/// store paths signed with such keys.
|
||||||
|
pub upstream_cache_key_names: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuration of a cache.
|
||||||
|
///
|
||||||
|
/// Specifying `None` means using the default value or
|
||||||
|
/// keeping the current value.
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct CacheConfig {
|
||||||
|
/// The keypair of the cache.
|
||||||
|
///
|
||||||
|
/// The keypair is never returned by the server, but can
|
||||||
|
/// be configured by the client.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub keypair: Option<KeypairConfig>,
|
||||||
|
|
||||||
|
/// The Nix binary cache endpoint of the cache.
|
||||||
|
///
|
||||||
|
/// This is the endpoint that should be added to `nix.conf`.
|
||||||
|
/// This is read-only and may not be available.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub substituter_endpoint: Option<String>,
|
||||||
|
|
||||||
|
/// The Attic API endpoint.
|
||||||
|
///
|
||||||
|
/// This is read-only and may not be available.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub api_endpoint: Option<String>,
|
||||||
|
|
||||||
|
/// The public key of the cache, in the canonical format used by Nix.
|
||||||
|
///
|
||||||
|
/// This is read-only and may not be available.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub public_key: Option<String>,
|
||||||
|
|
||||||
|
/// Whether the cache is public or not.
|
||||||
|
///
|
||||||
|
/// Anonymous clients are implicitly granted the "pull"
|
||||||
|
/// permission to public caches.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub is_public: Option<bool>,
|
||||||
|
|
||||||
|
/// The Nix store path this binary cache uses.
|
||||||
|
///
|
||||||
|
/// This is usually `/nix/store`.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub store_dir: Option<String>,
|
||||||
|
|
||||||
|
/// The priority of the binary cache.
|
||||||
|
///
|
||||||
|
/// A lower number denotes a higher priority.
|
||||||
|
/// <https://cache.nixos.org> has a priority of 40.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub priority: Option<i32>,
|
||||||
|
|
||||||
|
/// A list of signing key names of upstream caches.
|
||||||
|
///
|
||||||
|
/// The list serves as a hint to clients to avoid uploading
|
||||||
|
/// store paths signed with such keys.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub upstream_cache_key_names: Option<Vec<String>>,
|
||||||
|
|
||||||
|
/// The retention period of the cache.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub retention_period: Option<RetentionPeriodConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuaration of a keypair.
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub enum KeypairConfig {
|
||||||
|
/// Use a randomly-generated keypair.
|
||||||
|
Generate,
|
||||||
|
|
||||||
|
/// Use a client-specified keypair.
|
||||||
|
Keypair(NixKeypair),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuration of retention period.
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub enum RetentionPeriodConfig {
|
||||||
|
/// Use the global default.
|
||||||
|
Global,
|
||||||
|
|
||||||
|
/// Specify a retention period in seconds.
|
||||||
|
///
|
||||||
|
/// If 0, then time-based garbage collection is disabled.
|
||||||
|
Period(u32),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CacheConfig {
|
||||||
|
pub fn blank() -> Self {
|
||||||
|
Self {
|
||||||
|
keypair: None,
|
||||||
|
substituter_endpoint: None,
|
||||||
|
api_endpoint: None,
|
||||||
|
public_key: None,
|
||||||
|
is_public: None,
|
||||||
|
store_dir: None,
|
||||||
|
priority: None,
|
||||||
|
upstream_cache_key_names: None,
|
||||||
|
retention_period: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
25
attic/src/api/v1/get_missing_paths.rs
Normal file
25
attic/src/api/v1/get_missing_paths.rs
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
//! get-missing-paths v1
|
||||||
|
//!
|
||||||
|
//! `POST /_api/v1/get-missing-paths`
|
||||||
|
//!
|
||||||
|
//! Requires "push" permission.
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::cache::CacheName;
|
||||||
|
use crate::nix_store::StorePathHash;
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct GetMissingPathsRequest {
|
||||||
|
/// The name of the cache.
|
||||||
|
pub cache: CacheName,
|
||||||
|
|
||||||
|
/// The list of store paths.
|
||||||
|
pub store_path_hashes: Vec<StorePathHash>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct GetMissingPathsResponse {
|
||||||
|
/// A list of paths that are not in the cache.
|
||||||
|
pub missing_paths: Vec<StorePathHash>,
|
||||||
|
}
|
3
attic/src/api/v1/mod.rs
Normal file
3
attic/src/api/v1/mod.rs
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
pub mod cache_config;
|
||||||
|
pub mod get_missing_paths;
|
||||||
|
pub mod upload_path;
|
52
attic/src/api/v1/upload_path.rs
Normal file
52
attic/src/api/v1/upload_path.rs
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::cache::CacheName;
|
||||||
|
use crate::hash::Hash;
|
||||||
|
use crate::nix_store::StorePathHash;
|
||||||
|
|
||||||
|
/// NAR information associated with a upload.
|
||||||
|
///
|
||||||
|
/// This is JSON-serialized as the value of the `X-Attic-Nar-Info` header.
|
||||||
|
/// The (client-compressed) NAR is the PUT body.
|
||||||
|
///
|
||||||
|
/// Regardless of client compression, the server will always decompress
|
||||||
|
/// the NAR to validate the NAR hash before applying the server-configured
|
||||||
|
/// compression again.
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct UploadPathNarInfo {
|
||||||
|
/// The name of the binary cache to upload to.
|
||||||
|
pub cache: CacheName,
|
||||||
|
|
||||||
|
/// The hash portion of the store path.
|
||||||
|
pub store_path_hash: StorePathHash,
|
||||||
|
|
||||||
|
/// The full store path being cached, including the store directory.
|
||||||
|
pub store_path: String,
|
||||||
|
|
||||||
|
/// Other store paths this object directly refereces.
|
||||||
|
pub references: Vec<String>,
|
||||||
|
|
||||||
|
/// The system this derivation is built for.
|
||||||
|
pub system: Option<String>,
|
||||||
|
|
||||||
|
/// The derivation that produced this object.
|
||||||
|
pub deriver: Option<String>,
|
||||||
|
|
||||||
|
/// The signatures of this object.
|
||||||
|
pub sigs: Vec<String>,
|
||||||
|
|
||||||
|
/// The CA field of this object.
|
||||||
|
pub ca: Option<String>,
|
||||||
|
|
||||||
|
/// The hash of the NAR.
|
||||||
|
///
|
||||||
|
/// It must begin with `sha256:` with the SHA-256 hash in the
|
||||||
|
/// hexadecimal format (64 hex characters).
|
||||||
|
///
|
||||||
|
/// This is informational and the server always validates the supplied
|
||||||
|
/// hash.
|
||||||
|
pub nar_hash: Hash,
|
||||||
|
|
||||||
|
/// The size of the NAR.
|
||||||
|
pub nar_size: usize,
|
||||||
|
}
|
266
attic/src/cache.rs
Normal file
266
attic/src/cache.rs
Normal file
|
@ -0,0 +1,266 @@
|
||||||
|
//! Binary caches.
|
||||||
|
//!
|
||||||
|
//! ## Cache Naming
|
||||||
|
//!
|
||||||
|
//! Cache names can be up to 50 characters long and can only consist of
|
||||||
|
//! ASCII alphanumeric characters (A-Za-z0-9), dashes ('-'), underscores
|
||||||
|
//! ('_'), and plus signs ('+'). They must also start with an alphanumeric
|
||||||
|
//! character (e.g., "_cache" is _not_ a valid cache name).
|
||||||
|
//!
|
||||||
|
/// The plus sign is intended to be used as the delimiter between a
|
||||||
|
/// namespace and a user-given name (e.g., `zhaofengli+shared`).
|
||||||
|
use std::hash::{Hash, Hasher};
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use regex::Regex;
|
||||||
|
use serde::{de, Deserialize, Serialize};
|
||||||
|
use wildmatch::WildMatch;
|
||||||
|
|
||||||
|
use crate::error::{AtticError, AtticResult};
|
||||||
|
|
||||||
|
/// The maximum allowable length of a cache name.
|
||||||
|
pub const MAX_NAME_LENGTH: usize = 50;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref CACHE_NAME_REGEX: Regex = Regex::new(r"^[A-Za-z0-9][A-Za-z0-9-_+]{0,49}$").unwrap();
|
||||||
|
static ref CACHE_NAME_PATTERN_REGEX: Regex =
|
||||||
|
Regex::new(r"^[A-Za-z0-9*][A-Za-z0-9-_+*]{0,49}$").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The name of a binary cache.
|
||||||
|
///
|
||||||
|
/// Names can only consist of ASCII alphanumeric characters (A-Za-z0-9),
|
||||||
|
/// dashes ('-'), underscores ('_'), and plus signs ('+').
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Debug, Hash, Eq, PartialEq)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
pub struct CacheName(#[serde(deserialize_with = "CacheName::deserialize")] String);
|
||||||
|
|
||||||
|
/// A pattern of cache names.
|
||||||
|
///
|
||||||
|
/// The keys in the custom JWT claim are patterns that can
|
||||||
|
/// be matched against cache names. Thus patterns can only be created
|
||||||
|
/// by trusted entities.
|
||||||
|
///
|
||||||
|
/// In addition to what's allowed in cache names, patterns can include
|
||||||
|
/// wildcards ('*') to enable a limited form of namespace-based access
|
||||||
|
/// control.
|
||||||
|
///
|
||||||
|
/// This is particularly useful in conjunction with the `cache_create`
|
||||||
|
/// permission which allows the user to autonomously create caches under
|
||||||
|
/// their own namespace (e.g., `zhaofengli+*`).
|
||||||
|
#[derive(Serialize, Clone, Debug)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
pub struct CacheNamePattern {
|
||||||
|
pattern: String,
|
||||||
|
|
||||||
|
/// The pattern matcher.
|
||||||
|
///
|
||||||
|
/// If None, then `pattern` itself will be used to match exactly.
|
||||||
|
/// This is a special case for converting a CacheName to a
|
||||||
|
/// CacheNamePattern.
|
||||||
|
///
|
||||||
|
/// It's possible to combine the two structs into one, but the goal
|
||||||
|
/// is to have strong delineation between them enforced by the type
|
||||||
|
/// system (you can't attempt to call `matches` at all on a regular
|
||||||
|
/// CacheName).
|
||||||
|
#[serde(skip)]
|
||||||
|
matcher: Option<WildMatch>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CacheName {
|
||||||
|
/// Creates a cache name from a String.
|
||||||
|
pub fn new(name: String) -> AtticResult<Self> {
|
||||||
|
validate_cache_name(&name, false)?;
|
||||||
|
Ok(Self(name))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the string.
|
||||||
|
pub fn as_str(&self) -> &str {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_string(&self) -> String {
|
||||||
|
self.0.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the corresponding pattern that only matches this cache.
|
||||||
|
pub fn to_pattern(&self) -> CacheNamePattern {
|
||||||
|
CacheNamePattern {
|
||||||
|
pattern: self.0.clone(),
|
||||||
|
matcher: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deserializes a potentially-invalid cache name.
|
||||||
|
fn deserialize<'de, D>(deserializer: D) -> Result<String, D::Error>
|
||||||
|
where
|
||||||
|
D: de::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
use de::Error;
|
||||||
|
String::deserialize(deserializer).and_then(|s| {
|
||||||
|
validate_cache_name(&s, false).map_err(|e| Error::custom(e.to_string()))?;
|
||||||
|
Ok(s)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for CacheName {
|
||||||
|
type Err = AtticError;
|
||||||
|
|
||||||
|
fn from_str(name: &str) -> AtticResult<Self> {
|
||||||
|
Self::new(name.to_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CacheNamePattern {
|
||||||
|
/// Creates a cache name pattern from a String.
|
||||||
|
pub fn new(pattern: String) -> AtticResult<Self> {
|
||||||
|
validate_cache_name(&pattern, true)?;
|
||||||
|
let matcher = WildMatch::new(&pattern);
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
pattern,
|
||||||
|
matcher: Some(matcher),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tests if the pattern matches a name.
|
||||||
|
pub fn matches(&self, name: &CacheName) -> bool {
|
||||||
|
match &self.matcher {
|
||||||
|
Some(matcher) => matcher.matches(name.as_str()),
|
||||||
|
None => self.pattern == name.as_str(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for CacheNamePattern {
|
||||||
|
type Err = AtticError;
|
||||||
|
|
||||||
|
fn from_str(pattern: &str) -> AtticResult<Self> {
|
||||||
|
Self::new(pattern.to_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for CacheNamePattern {
|
||||||
|
/// Deserializes a potentially-invalid cache name pattern.
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: de::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
use de::Error;
|
||||||
|
let pattern = String::deserialize(deserializer).and_then(|s| {
|
||||||
|
validate_cache_name(&s, true).map_err(|e| Error::custom(e.to_string()))?;
|
||||||
|
Ok(s)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let matcher = WildMatch::new(&pattern);
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
pattern,
|
||||||
|
matcher: Some(matcher),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hash for CacheNamePattern {
|
||||||
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||||
|
self.pattern.hash(state);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq for CacheNamePattern {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
self.pattern == other.pattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Eq for CacheNamePattern {}
|
||||||
|
|
||||||
|
fn validate_cache_name(name: &str, allow_wildcards: bool) -> AtticResult<()> {
|
||||||
|
let valid = if allow_wildcards {
|
||||||
|
CACHE_NAME_PATTERN_REGEX.is_match(name)
|
||||||
|
} else {
|
||||||
|
CACHE_NAME_REGEX.is_match(name)
|
||||||
|
};
|
||||||
|
|
||||||
|
if valid {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(AtticError::InvalidCacheName {
|
||||||
|
name: name.to_owned(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
macro_rules! cache {
|
||||||
|
($n:expr) => {
|
||||||
|
CacheName::new($n.to_string()).unwrap()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) use cache;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_cache_name() {
|
||||||
|
let names = vec![
|
||||||
|
"valid-name",
|
||||||
|
"Another_Valid_Name",
|
||||||
|
"plan9",
|
||||||
|
"username+cache",
|
||||||
|
];
|
||||||
|
|
||||||
|
for name in names {
|
||||||
|
assert_eq!(name, CacheName::new(name.to_string()).unwrap().as_str());
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
name,
|
||||||
|
serde_json::from_str::<CacheName>(&format!("\"{}\"", name))
|
||||||
|
.unwrap()
|
||||||
|
.as_str(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let bad_names = vec![
|
||||||
|
"",
|
||||||
|
"not a valid name",
|
||||||
|
"team-*",
|
||||||
|
"这布盒里.webp",
|
||||||
|
"-ers",
|
||||||
|
"and-you-can-have-it-all-my-empire-of-dirt-i-will-let-you-down-i-will-make-you-hurt",
|
||||||
|
];
|
||||||
|
|
||||||
|
for name in bad_names {
|
||||||
|
CacheName::new(name.to_string()).unwrap_err();
|
||||||
|
serde_json::from_str::<CacheName>(&format!("\"{}\"", name)).unwrap_err();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_cache_name_pattern() {
|
||||||
|
let pattern = CacheNamePattern::new("team-*".to_string()).unwrap();
|
||||||
|
assert!(pattern.matches(&cache! { "team-" }));
|
||||||
|
assert!(pattern.matches(&cache! { "team-abc" }));
|
||||||
|
assert!(!pattern.matches(&cache! { "abc-team" }));
|
||||||
|
|
||||||
|
let pattern = CacheNamePattern::new("no-wildcard".to_string()).unwrap();
|
||||||
|
assert!(pattern.matches(&cache! { "no-wildcard" }));
|
||||||
|
assert!(!pattern.matches(&cache! { "no-wildcard-xxx" }));
|
||||||
|
assert!(!pattern.matches(&cache! { "xxx-no-wildcard" }));
|
||||||
|
|
||||||
|
let pattern = CacheNamePattern::new("*".to_string()).unwrap();
|
||||||
|
assert!(pattern.matches(&cache! { "literally-anything" }));
|
||||||
|
|
||||||
|
CacheNamePattern::new("*-but-normal-restrictions-still-apply!!!".to_string()).unwrap_err();
|
||||||
|
|
||||||
|
// eq
|
||||||
|
let pattern1 = CacheNamePattern::new("same-pattern".to_string()).unwrap();
|
||||||
|
let pattern2 = CacheNamePattern::new("same-pattern".to_string()).unwrap();
|
||||||
|
assert_eq!(pattern1, pattern2);
|
||||||
|
assert_ne!(pattern, pattern1);
|
||||||
|
}
|
||||||
|
}
|
84
attic/src/error.rs
Normal file
84
attic/src/error.rs
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
//! Error handling.
|
||||||
|
|
||||||
|
use std::error::Error as StdError;
|
||||||
|
use std::io;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use displaydoc::Display;
|
||||||
|
|
||||||
|
pub type AtticResult<T> = Result<T, AtticError>;
|
||||||
|
|
||||||
|
/// An error.
|
||||||
|
#[derive(Debug, Display)]
|
||||||
|
pub enum AtticError {
|
||||||
|
/// Invalid store path {path:?}: {reason}
|
||||||
|
InvalidStorePath { path: PathBuf, reason: &'static str },
|
||||||
|
|
||||||
|
/// Invalid store path base name {base_name:?}: {reason}
|
||||||
|
InvalidStorePathName {
|
||||||
|
base_name: PathBuf,
|
||||||
|
reason: &'static str,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Invalid store path hash "{hash}": {reason}
|
||||||
|
InvalidStorePathHash { hash: String, reason: &'static str },
|
||||||
|
|
||||||
|
/// Invalid cache name "{name}"
|
||||||
|
InvalidCacheName { name: String },
|
||||||
|
|
||||||
|
/// Signing error: {0}
|
||||||
|
SigningError(super::signing::Error),
|
||||||
|
|
||||||
|
/// Hashing error: {0}
|
||||||
|
HashError(super::hash::Error),
|
||||||
|
|
||||||
|
/// I/O error: {error}.
|
||||||
|
IoError { error: io::Error },
|
||||||
|
|
||||||
|
/// Unknown C++ exception: {exception}.
|
||||||
|
CxxError { exception: String },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AtticError {
|
||||||
|
pub fn name(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Self::InvalidStorePath { .. } => "InvalidStorePath",
|
||||||
|
Self::InvalidStorePathName { .. } => "InvalidStorePathName",
|
||||||
|
Self::InvalidStorePathHash { .. } => "InvalidStorePathHash",
|
||||||
|
Self::InvalidCacheName { .. } => "InvalidCacheName",
|
||||||
|
Self::SigningError(_) => "SigningError",
|
||||||
|
Self::HashError(_) => "HashError",
|
||||||
|
Self::IoError { .. } => "IoError",
|
||||||
|
Self::CxxError { .. } => "CxxError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StdError for AtticError {}
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
impl From<cxx::Exception> for AtticError {
|
||||||
|
fn from(exception: cxx::Exception) -> Self {
|
||||||
|
Self::CxxError {
|
||||||
|
exception: exception.what().to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<io::Error> for AtticError {
|
||||||
|
fn from(error: io::Error) -> Self {
|
||||||
|
Self::IoError { error }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<super::signing::Error> for AtticError {
|
||||||
|
fn from(error: super::signing::Error) -> Self {
|
||||||
|
Self::SigningError(error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<super::hash::Error> for AtticError {
|
||||||
|
fn from(error: super::hash::Error) -> Self {
|
||||||
|
Self::HashError(error)
|
||||||
|
}
|
||||||
|
}
|
153
attic/src/hash/mod.rs
Normal file
153
attic/src/hash/mod.rs
Normal file
|
@ -0,0 +1,153 @@
|
||||||
|
//! Hashing utilities.
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
use displaydoc::Display;
|
||||||
|
use serde::{de, ser, Deserialize, Serialize};
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
|
use crate::error::AtticResult;
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
use crate::nix_store::{FfiHash, FfiHashType};
|
||||||
|
|
||||||
|
/// A hash.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub enum Hash {
|
||||||
|
/// An SHA-256 hash.
|
||||||
|
Sha256([u8; 32]),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A hashing error.
|
||||||
|
#[derive(Debug, Display)]
|
||||||
|
pub enum Error {
|
||||||
|
/// The string lacks a colon separator.
|
||||||
|
NoColonSeparator,
|
||||||
|
|
||||||
|
/// Hash algorithm {0} is not supported.
|
||||||
|
UnsupportedHashAlgorithm(String),
|
||||||
|
|
||||||
|
/// Invalid base16 hash: {0}
|
||||||
|
InvalidBase16Hash(hex::FromHexError),
|
||||||
|
|
||||||
|
/// Invalid base32 hash.
|
||||||
|
InvalidBase32Hash,
|
||||||
|
|
||||||
|
/// Invalid length for {typ} string: Must be either {base16_len} (hexadecimal) or {base32_len} (base32), got {actual}.
|
||||||
|
InvalidHashStringLength {
|
||||||
|
typ: &'static str,
|
||||||
|
base16_len: usize,
|
||||||
|
base32_len: usize,
|
||||||
|
actual: usize,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hash {
|
||||||
|
/// Convenience function to generate a SHA-256 hash from a slice.
|
||||||
|
pub fn sha256_from_bytes(bytes: &[u8]) -> Self {
|
||||||
|
let mut hasher = Sha256::new();
|
||||||
|
hasher.update(bytes);
|
||||||
|
Self::Sha256(hasher.finalize().into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parses a typed representation of a hash.
|
||||||
|
pub fn from_typed(s: &str) -> AtticResult<Self> {
|
||||||
|
let colon = s.find(':').ok_or(Error::NoColonSeparator)?;
|
||||||
|
|
||||||
|
let (typ, rest) = s.split_at(colon);
|
||||||
|
let hash = &rest[1..];
|
||||||
|
|
||||||
|
match typ {
|
||||||
|
"sha256" => {
|
||||||
|
let v = decode_hash(hash, "SHA-256", 32)?;
|
||||||
|
Ok(Self::Sha256(v.try_into().unwrap()))
|
||||||
|
}
|
||||||
|
_ => Err(Error::UnsupportedHashAlgorithm(typ.to_owned()).into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the hash in Nix-specific Base32 format, with the hash type prepended.
|
||||||
|
pub fn to_typed_base32(&self) -> String {
|
||||||
|
format!("{}:{}", self.hash_type(), self.to_base32())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the hash in hexadecimal format, with the hash type prepended.
|
||||||
|
///
|
||||||
|
/// This is the canonical representation of hashes in the Attic database.
|
||||||
|
pub fn to_typed_base16(&self) -> String {
|
||||||
|
format!("{}:{}", self.hash_type(), hex::encode(self.data()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn data(&self) -> &[u8] {
|
||||||
|
match self {
|
||||||
|
Self::Sha256(d) => d,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hash_type(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Self::Sha256(_) => "sha256",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the hash in Nix-specific Base32 format.
|
||||||
|
fn to_base32(&self) -> String {
|
||||||
|
nix_base32::to_nix_base32(self.data())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
pub(super) fn from_ffi_hash(hash: FfiHash) -> AtticResult<Self> {
|
||||||
|
match hash.type_ {
|
||||||
|
FfiHashType::Sha256 => Ok(Self::Sha256(hash.hash[..32].try_into().unwrap())),
|
||||||
|
typ => Err(Error::UnsupportedHashAlgorithm(typ.as_str().to_owned()).into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for Hash {
|
||||||
|
/// Deserializes a typed hash string.
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: de::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
use de::Error;
|
||||||
|
|
||||||
|
String::deserialize(deserializer)
|
||||||
|
.and_then(|s| Self::from_typed(&s).map_err(|e| Error::custom(e.to_string())))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for Hash {
|
||||||
|
/// Serializes a hash into a hexadecimal hash string.
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: ser::Serializer,
|
||||||
|
{
|
||||||
|
serializer.serialize_str(&self.to_typed_base16())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decodes a base16 or base32 encoded hash containing a specified number of bytes.
|
||||||
|
fn decode_hash<'s>(s: &'s str, typ: &'static str, expected_bytes: usize) -> AtticResult<Vec<u8>> {
|
||||||
|
let base16_len = expected_bytes * 2;
|
||||||
|
let base32_len = (expected_bytes * 8 - 1) / 5 + 1;
|
||||||
|
|
||||||
|
let v = if s.len() == base16_len {
|
||||||
|
hex::decode(s).map_err(Error::InvalidBase16Hash)?
|
||||||
|
} else if s.len() == base32_len {
|
||||||
|
nix_base32::from_nix_base32(s).ok_or(Error::InvalidBase32Hash)?
|
||||||
|
} else {
|
||||||
|
return Err(Error::InvalidHashStringLength {
|
||||||
|
typ,
|
||||||
|
base16_len,
|
||||||
|
base32_len,
|
||||||
|
actual: s.len(),
|
||||||
|
}
|
||||||
|
.into());
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(v.len() == expected_bytes);
|
||||||
|
|
||||||
|
Ok(v)
|
||||||
|
}
|
1
attic/src/hash/tests/.gitattributes
vendored
Normal file
1
attic/src/hash/tests/.gitattributes
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
blob -text
|
15
attic/src/hash/tests/blob
Normal file
15
attic/src/hash/tests/blob
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
⊂_ヽ
|
||||||
|
\\ _
|
||||||
|
\( •_•) F
|
||||||
|
< ⌒ヽ A
|
||||||
|
/ へ\ B
|
||||||
|
/ / \\ U
|
||||||
|
レ ノ ヽ_つ L
|
||||||
|
/ / O
|
||||||
|
/ /| U
|
||||||
|
( (ヽ S
|
||||||
|
| |、\
|
||||||
|
| 丿 \ ⌒)
|
||||||
|
| | ) /
|
||||||
|
`ノ ) Lノ
|
||||||
|
(_/
|
62
attic/src/hash/tests/mod.rs
Normal file
62
attic/src/hash/tests/mod.rs
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
use crate::error::AtticError;
|
||||||
|
use crate::nix_store::tests::test_nar;
|
||||||
|
|
||||||
|
const BLOB: &[u8] = include_bytes!("blob");
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_basic() {
|
||||||
|
let hash = Hash::sha256_from_bytes(BLOB);
|
||||||
|
|
||||||
|
let expected_base16 = "sha256:df3404eaf1481506db9ca155e0a871d5b4d22e62a96961e8bf4ad1a8ca525330";
|
||||||
|
assert_eq!(expected_base16, hash.to_typed_base16());
|
||||||
|
|
||||||
|
let expected_base32 = "sha256:0c2kab5ailaapzl62sd9c8pd5d6mf6lf0md1kkdhc5a8y7m08d6z";
|
||||||
|
assert_eq!(expected_base32, hash.to_typed_base32());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_nar_hash() {
|
||||||
|
let nar = test_nar::NO_DEPS;
|
||||||
|
let hash = Hash::sha256_from_bytes(nar.nar());
|
||||||
|
|
||||||
|
let expected_base32 = "sha256:0hjszid30ak3rkzvc3m94c3risg8wz2hayy100c1fg92bjvvvsms";
|
||||||
|
assert_eq!(expected_base32, hash.to_typed_base32());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_from_typed() {
|
||||||
|
let base16 = "sha256:baeabdb75c223d171800c17b05c5e7e8e9980723a90eb6ffcc632a305afc5a42";
|
||||||
|
let base32 = "sha256:0hjszid30ak3rkzvc3m94c3risg8wz2hayy100c1fg92bjvvvsms";
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
Hash::from_typed(base16).unwrap(),
|
||||||
|
Hash::from_typed(base32).unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(matches!(
|
||||||
|
Hash::from_typed("sha256"),
|
||||||
|
Err(AtticError::HashError(Error::NoColonSeparator))
|
||||||
|
));
|
||||||
|
|
||||||
|
assert!(matches!(
|
||||||
|
Hash::from_typed("sha256:"),
|
||||||
|
Err(AtticError::HashError(Error::InvalidHashStringLength { .. }))
|
||||||
|
));
|
||||||
|
|
||||||
|
assert!(matches!(
|
||||||
|
Hash::from_typed("sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"),
|
||||||
|
Err(AtticError::HashError(Error::InvalidBase32Hash))
|
||||||
|
));
|
||||||
|
|
||||||
|
assert!(matches!(
|
||||||
|
Hash::from_typed("sha256:gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg"),
|
||||||
|
Err(AtticError::HashError(Error::InvalidBase16Hash(_)))
|
||||||
|
));
|
||||||
|
|
||||||
|
assert!(matches!(
|
||||||
|
Hash::from_typed("md5:invalid"),
|
||||||
|
Err(AtticError::HashError(Error::UnsupportedHashAlgorithm(alg))) if alg == "md5"
|
||||||
|
));
|
||||||
|
}
|
29
attic/src/lib.rs
Normal file
29
attic/src/lib.rs
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
//! The Attic Library.
|
||||||
|
|
||||||
|
#![deny(
|
||||||
|
asm_sub_register,
|
||||||
|
deprecated,
|
||||||
|
missing_abi,
|
||||||
|
unsafe_code,
|
||||||
|
unused_macros,
|
||||||
|
unused_must_use,
|
||||||
|
unused_unsafe
|
||||||
|
)]
|
||||||
|
#![deny(clippy::from_over_into, clippy::needless_question_mark)]
|
||||||
|
#![cfg_attr(
|
||||||
|
not(debug_assertions),
|
||||||
|
deny(unused_imports, unused_mut, unused_variables,)
|
||||||
|
)]
|
||||||
|
|
||||||
|
pub mod api;
|
||||||
|
pub mod cache;
|
||||||
|
pub mod error;
|
||||||
|
pub mod hash;
|
||||||
|
pub mod mime;
|
||||||
|
pub mod nix_store;
|
||||||
|
pub mod signing;
|
||||||
|
pub mod stream;
|
||||||
|
pub mod testing;
|
||||||
|
pub mod util;
|
||||||
|
|
||||||
|
pub use error::{AtticError, AtticResult};
|
10
attic/src/mime.rs
Normal file
10
attic/src/mime.rs
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
//! MIME types.
|
||||||
|
|
||||||
|
/// /nix-cache-info
|
||||||
|
pub const NIX_CACHE_INFO: &str = "text/x-nix-cache-info";
|
||||||
|
|
||||||
|
/// .narinfo
|
||||||
|
pub const NARINFO: &str = "text/x-nix-narinfo";
|
||||||
|
|
||||||
|
/// .nar
|
||||||
|
pub const NAR: &str = "application/x-nix-nar";
|
14
attic/src/nix_store/README.md
Normal file
14
attic/src/nix_store/README.md
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
# Nix Store Bindings
|
||||||
|
|
||||||
|
This directory contains a set of high-level Rust bindings to `libnixstore`, compatible with `async`/`await` semantics.
|
||||||
|
We currently target Nix 2.4+.
|
||||||
|
|
||||||
|
## Why?
|
||||||
|
|
||||||
|
With this wrapper, now you can do things like:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let store = NixStore::connect()?;
|
||||||
|
let store_path = store.parse_store_path("/nix/store/ia70ss13m22znbl8khrf2hq72qmh5drr-ruby-2.7.5")?;
|
||||||
|
let nar_stream = store.nar_from_path(store_path); # AsyncWrite
|
||||||
|
```
|
2
attic/src/nix_store/bindings/bindgen.hpp
Normal file
2
attic/src/nix_store/bindings/bindgen.hpp
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
#include <nix/hash.hh>
|
||||||
|
#include <nix/store-api.hh>
|
37
attic/src/nix_store/bindings/bindgen.rs
Normal file
37
attic/src/nix_store/bindings/bindgen.rs
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
//! Generated by `rust-bindgen`.
|
||||||
|
//!
|
||||||
|
//! We use `rust-bindgen` to generate bindings for a limited set of simpler
|
||||||
|
//! structures.
|
||||||
|
|
||||||
|
#![allow(
|
||||||
|
dead_code,
|
||||||
|
deref_nullptr,
|
||||||
|
improper_ctypes,
|
||||||
|
non_camel_case_types,
|
||||||
|
non_snake_case,
|
||||||
|
non_upper_case_globals
|
||||||
|
)]
|
||||||
|
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/bindgen.rs"));
|
||||||
|
|
||||||
|
use crate::error::AtticResult;
|
||||||
|
use crate::hash::Hash as RustHash;
|
||||||
|
|
||||||
|
impl Hash {
|
||||||
|
/// Converts this into the native Rust version of this hash.
|
||||||
|
pub fn into_rust(self) -> AtticResult<RustHash> {
|
||||||
|
RustHash::from_ffi_hash(self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HashType {
|
||||||
|
/// Returns the identifier of the hashing algorithm.
|
||||||
|
pub fn as_str(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
Self::Md5 => "md5",
|
||||||
|
Self::Sha1 => "sha1",
|
||||||
|
Self::Sha256 => "sha256",
|
||||||
|
Self::Sha512 => "sha512",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
272
attic/src/nix_store/bindings/mod.rs
Normal file
272
attic/src/nix_store/bindings/mod.rs
Normal file
|
@ -0,0 +1,272 @@
|
||||||
|
//! `libnixstore` Bindings
|
||||||
|
|
||||||
|
mod bindgen;
|
||||||
|
|
||||||
|
use std::cell::UnsafeCell;
|
||||||
|
use std::io;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use cxx::{type_id, ExternType};
|
||||||
|
use futures::stream::{Stream, StreamExt};
|
||||||
|
use tokio::io::{AsyncWrite, AsyncWriteExt};
|
||||||
|
|
||||||
|
use crate::{AtticError, AtticResult};
|
||||||
|
|
||||||
|
pub use bindgen::{Hash as FfiHash, HashType as FfiHashType};
|
||||||
|
|
||||||
|
unsafe impl ExternType for FfiHash {
|
||||||
|
type Id = type_id!("nix::Hash");
|
||||||
|
type Kind = cxx::kind::Trivial;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl ExternType for FfiHashType {
|
||||||
|
type Id = type_id!("nix::HashType");
|
||||||
|
type Kind = cxx::kind::Trivial;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The C++ implementation takes care of concurrency
|
||||||
|
#[repr(transparent)]
|
||||||
|
pub struct FfiNixStore(UnsafeCell<cxx::UniquePtr<ffi::CNixStore>>);
|
||||||
|
|
||||||
|
unsafe impl Send for FfiNixStore {}
|
||||||
|
unsafe impl Sync for FfiNixStore {}
|
||||||
|
|
||||||
|
impl FfiNixStore {
|
||||||
|
pub fn store<'a>(&'a self) -> Pin<&'a mut ffi::CNixStore> {
|
||||||
|
unsafe {
|
||||||
|
let ptr = self.0.get().as_mut().unwrap();
|
||||||
|
ptr.pin_mut()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Obtain a handle to the Nix store.
|
||||||
|
pub unsafe fn open_nix_store() -> AtticResult<FfiNixStore> {
|
||||||
|
match ffi::open_nix_store() {
|
||||||
|
Ok(ptr) => {
|
||||||
|
let cell = UnsafeCell::new(ptr);
|
||||||
|
Ok(FfiNixStore(cell))
|
||||||
|
}
|
||||||
|
Err(e) => Err(e.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Benchmark different implementations
|
||||||
|
// (tokio, crossbeam, flume)
|
||||||
|
mod mpsc {
|
||||||
|
// Tokio
|
||||||
|
pub use tokio::sync::mpsc::{
|
||||||
|
error::SendError, unbounded_channel, UnboundedReceiver, UnboundedSender,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Async write request.
|
||||||
|
#[derive(Debug)]
|
||||||
|
enum AsyncWriteMessage {
|
||||||
|
Data(Vec<u8>),
|
||||||
|
Error(String),
|
||||||
|
Eof,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Async write request sender.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct AsyncWriteSender {
|
||||||
|
sender: mpsc::UnboundedSender<AsyncWriteMessage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsyncWriteSender {
|
||||||
|
fn send(&mut self, data: &[u8]) -> Result<(), mpsc::SendError<AsyncWriteMessage>> {
|
||||||
|
let message = AsyncWriteMessage::Data(Vec::from(data));
|
||||||
|
self.sender.send(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn eof(&mut self) -> Result<(), mpsc::SendError<AsyncWriteMessage>> {
|
||||||
|
let message = AsyncWriteMessage::Eof;
|
||||||
|
self.sender.send(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn rust_error(
|
||||||
|
&mut self,
|
||||||
|
error: impl std::error::Error,
|
||||||
|
) -> Result<(), impl std::error::Error> {
|
||||||
|
let message = AsyncWriteMessage::Error(error.to_string());
|
||||||
|
self.sender.send(message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A wrapper of the `AsyncWrite` trait for the synchronous Nix C++ land.
|
||||||
|
pub struct AsyncWriteAdapter {
|
||||||
|
receiver: mpsc::UnboundedReceiver<AsyncWriteMessage>,
|
||||||
|
eof: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsyncWriteAdapter {
|
||||||
|
pub fn new() -> (Self, Box<AsyncWriteSender>) {
|
||||||
|
let (sender, receiver) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
let r = Self {
|
||||||
|
receiver,
|
||||||
|
eof: false,
|
||||||
|
};
|
||||||
|
let sender = Box::new(AsyncWriteSender { sender });
|
||||||
|
|
||||||
|
(r, sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write everything the sender sends to us.
|
||||||
|
pub async fn write_all(mut self, mut writer: Box<dyn AsyncWrite + Unpin>) -> AtticResult<()> {
|
||||||
|
let writer = writer.as_mut();
|
||||||
|
|
||||||
|
while let Some(data) = self.next().await {
|
||||||
|
match data {
|
||||||
|
Ok(v) => {
|
||||||
|
writer.write_all(&v).await?;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !self.eof {
|
||||||
|
Err(io::Error::from(io::ErrorKind::BrokenPipe).into())
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Stream for AsyncWriteAdapter {
|
||||||
|
type Item = AtticResult<Vec<u8>>;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
|
match self.receiver.poll_recv(cx) {
|
||||||
|
Poll::Pending => Poll::Pending,
|
||||||
|
Poll::Ready(Some(message)) => {
|
||||||
|
use AsyncWriteMessage::*;
|
||||||
|
match message {
|
||||||
|
Data(v) => Poll::Ready(Some(Ok(v))),
|
||||||
|
Error(exception) => {
|
||||||
|
let error = AtticError::CxxError { exception };
|
||||||
|
Poll::Ready(Some(Err(error)))
|
||||||
|
}
|
||||||
|
Eof => {
|
||||||
|
self.eof = true;
|
||||||
|
Poll::Ready(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Poll::Ready(None) => {
|
||||||
|
if !self.eof {
|
||||||
|
Poll::Ready(Some(Err(io::Error::from(io::ErrorKind::BrokenPipe).into())))
|
||||||
|
} else {
|
||||||
|
Poll::Ready(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cxx::bridge]
|
||||||
|
/// Generated by `cxx.rs`.
|
||||||
|
///
|
||||||
|
/// Mid-level wrapper of `libnixstore` implemented in C++.
|
||||||
|
mod ffi {
|
||||||
|
extern "Rust" {
|
||||||
|
type AsyncWriteSender;
|
||||||
|
fn send(self: &mut AsyncWriteSender, data: &[u8]) -> Result<()>;
|
||||||
|
fn eof(self: &mut AsyncWriteSender) -> Result<()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe extern "C++" {
|
||||||
|
include!("attic/src/nix_store/bindings/nix.hpp");
|
||||||
|
|
||||||
|
#[namespace = "nix"]
|
||||||
|
type Hash = super::FfiHash;
|
||||||
|
|
||||||
|
// =========
|
||||||
|
// CNixStore
|
||||||
|
// =========
|
||||||
|
|
||||||
|
/// Mid-level wrapper for the Unix Domain Socket Nix Store.
|
||||||
|
type CNixStore;
|
||||||
|
|
||||||
|
/// Returns the path of the Nix store itself.
|
||||||
|
fn store_dir(self: Pin<&mut CNixStore>) -> String;
|
||||||
|
|
||||||
|
/*
|
||||||
|
/// Verifies that a path is indeed in the Nix store, then return the base store path.
|
||||||
|
///
|
||||||
|
/// Use parse_store_path instead.
|
||||||
|
fn to_store_path(self: Pin<&mut CNixStore>, path: &str) -> Result<String>;
|
||||||
|
*/
|
||||||
|
|
||||||
|
/// Queries information about a valid path.
|
||||||
|
fn query_path_info(
|
||||||
|
self: Pin<&mut CNixStore>,
|
||||||
|
store_path: &[u8],
|
||||||
|
) -> Result<UniquePtr<CPathInfo>>;
|
||||||
|
|
||||||
|
/// Computes the closure of a valid path.
|
||||||
|
///
|
||||||
|
/// If `flip_directions` is true, the set of paths that can reach `store_path` is
|
||||||
|
/// returned.
|
||||||
|
fn compute_fs_closure(
|
||||||
|
self: Pin<&mut CNixStore>,
|
||||||
|
store_path: &[u8],
|
||||||
|
flip_direction: bool,
|
||||||
|
include_outputs: bool,
|
||||||
|
include_derivers: bool,
|
||||||
|
) -> Result<UniquePtr<CxxVector<CxxString>>>;
|
||||||
|
|
||||||
|
/// Computes the closure of a list of valid paths.
|
||||||
|
///
|
||||||
|
/// This is the multi-path variant of `compute_fs_closure`.
|
||||||
|
/// If `flip_directions` is true, the set of paths that can reach `store_path` is
|
||||||
|
/// returned.
|
||||||
|
///
|
||||||
|
/// It's easier and more efficient to just pass a vector of slices
|
||||||
|
/// instead of wrangling with concrete "extern rust" / "extern C++"
|
||||||
|
/// types.
|
||||||
|
fn compute_fs_closure_multi(
|
||||||
|
self: Pin<&mut CNixStore>,
|
||||||
|
base_names: &[&[u8]],
|
||||||
|
flip_direction: bool,
|
||||||
|
include_outputs: bool,
|
||||||
|
include_derivers: bool,
|
||||||
|
) -> Result<UniquePtr<CxxVector<CxxString>>>;
|
||||||
|
|
||||||
|
/// Creates a NAR dump from a path.
|
||||||
|
fn nar_from_path(
|
||||||
|
self: Pin<&mut CNixStore>,
|
||||||
|
base_name: Vec<u8>,
|
||||||
|
sender: Box<AsyncWriteSender>,
|
||||||
|
) -> Result<()>;
|
||||||
|
|
||||||
|
/// Obtains a handle to the Nix store.
|
||||||
|
fn open_nix_store() -> Result<UniquePtr<CNixStore>>;
|
||||||
|
|
||||||
|
// =========
|
||||||
|
// CPathInfo
|
||||||
|
// =========
|
||||||
|
|
||||||
|
/// Mid-level wrapper for the `nix::ValidPathInfo` struct.
|
||||||
|
type CPathInfo;
|
||||||
|
|
||||||
|
/// Returns the NAR hash of the store path.
|
||||||
|
fn nar_hash(self: Pin<&mut CPathInfo>) -> Hash;
|
||||||
|
|
||||||
|
/// Returns the size of the NAR.
|
||||||
|
fn nar_size(self: Pin<&mut CPathInfo>) -> u64;
|
||||||
|
|
||||||
|
/// Returns the references of the store path.
|
||||||
|
fn references(self: Pin<&mut CPathInfo>) -> UniquePtr<CxxVector<CxxString>>;
|
||||||
|
|
||||||
|
/// Returns the possibly invalid signatures attached to the store path.
|
||||||
|
fn sigs(self: Pin<&mut CPathInfo>) -> UniquePtr<CxxVector<CxxString>>;
|
||||||
|
|
||||||
|
/// Returns the CA field of the store path.
|
||||||
|
fn ca(self: Pin<&mut CPathInfo>) -> String;
|
||||||
|
}
|
||||||
|
}
|
133
attic/src/nix_store/bindings/nix.cpp
Normal file
133
attic/src/nix_store/bindings/nix.cpp
Normal file
|
@ -0,0 +1,133 @@
|
||||||
|
// C++ side of the libnixstore glue.
|
||||||
|
//
|
||||||
|
// We implement a mid-level wrapper of the Nix Store interface,
|
||||||
|
// which is then wrapped again in the Rust side to enable full
|
||||||
|
// async-await operation.
|
||||||
|
//
|
||||||
|
// Here we stick with the naming conventions of Rust and handle
|
||||||
|
// Rust types directly where possible, so that the interfaces are
|
||||||
|
// satisfying to use from the Rust side via cxx.rs.
|
||||||
|
|
||||||
|
#include "attic/src/nix_store/bindings/nix.hpp"
|
||||||
|
|
||||||
|
static nix::StorePath store_path_from_rust(RBasePathSlice base_name) {
|
||||||
|
std::string_view sv((const char *)base_name.data(), base_name.size());
|
||||||
|
return nix::StorePath(sv);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ========
|
||||||
|
// RustSink
|
||||||
|
// ========
|
||||||
|
|
||||||
|
RustSink::RustSink(RBox<AsyncWriteSender> sender) : sender(std::move(sender)) {}
|
||||||
|
|
||||||
|
void RustSink::operator () (std::string_view data) {
|
||||||
|
RBasePathSlice s((const unsigned char *)data.data(), data.size());
|
||||||
|
|
||||||
|
this->sender->send(s);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RustSink::eof() {
|
||||||
|
this->sender->eof();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// =========
|
||||||
|
// CPathInfo
|
||||||
|
// =========
|
||||||
|
|
||||||
|
CPathInfo::CPathInfo(nix::ref<const nix::ValidPathInfo> pi) : pi(pi) {}
|
||||||
|
|
||||||
|
nix::Hash CPathInfo::nar_hash() {
|
||||||
|
return this->pi->narHash;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t CPathInfo::nar_size() {
|
||||||
|
return this->pi->narSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<std::vector<std::string>> CPathInfo::sigs() {
|
||||||
|
std::vector<std::string> result;
|
||||||
|
for (auto&& elem : this->pi->sigs) {
|
||||||
|
result.push_back(std::string(elem));
|
||||||
|
}
|
||||||
|
return std::make_unique<std::vector<std::string>>(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<std::vector<std::string>> CPathInfo::references() {
|
||||||
|
std::vector<std::string> result;
|
||||||
|
for (auto&& elem : this->pi->references) {
|
||||||
|
result.push_back(std::string(elem.to_string()));
|
||||||
|
}
|
||||||
|
return std::make_unique<std::vector<std::string>>(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
RString CPathInfo::ca() {
|
||||||
|
if (this->pi->ca) {
|
||||||
|
return RString(nix::renderContentAddress(this->pi->ca));
|
||||||
|
} else {
|
||||||
|
return RString("");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// =========
|
||||||
|
// CNixStore
|
||||||
|
// =========
|
||||||
|
|
||||||
|
CNixStore::CNixStore() {
|
||||||
|
std::map<std::string, std::string> params;
|
||||||
|
this->store = nix::openStore("auto", params);
|
||||||
|
}
|
||||||
|
|
||||||
|
RString CNixStore::store_dir() {
|
||||||
|
return RString(this->store->storeDir);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<CPathInfo> CNixStore::query_path_info(RBasePathSlice base_name) {
|
||||||
|
auto store_path = store_path_from_rust(base_name);
|
||||||
|
|
||||||
|
auto r = this->store->queryPathInfo(store_path);
|
||||||
|
return std::make_unique<CPathInfo>(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<std::vector<std::string>> CNixStore::compute_fs_closure(RBasePathSlice base_name, bool flip_direction, bool include_outputs, bool include_derivers) {
|
||||||
|
std::set<nix::StorePath> out;
|
||||||
|
|
||||||
|
this->store->computeFSClosure(store_path_from_rust(base_name), out, flip_direction, include_outputs, include_derivers);
|
||||||
|
|
||||||
|
std::vector<std::string> result;
|
||||||
|
for (auto&& elem : out) {
|
||||||
|
result.push_back(std::string(elem.to_string()));
|
||||||
|
}
|
||||||
|
return std::make_unique<std::vector<std::string>>(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<std::vector<std::string>> CNixStore::compute_fs_closure_multi(RSlice<const RBasePathSlice> base_names, bool flip_direction, bool include_outputs, bool include_derivers) {
|
||||||
|
std::set<nix::StorePath> path_set, out;
|
||||||
|
for (auto&& base_name : base_names) {
|
||||||
|
path_set.insert(store_path_from_rust(base_name));
|
||||||
|
}
|
||||||
|
|
||||||
|
this->store->computeFSClosure(path_set, out, flip_direction, include_outputs, include_derivers);
|
||||||
|
|
||||||
|
std::vector<std::string> result;
|
||||||
|
for (auto&& elem : out) {
|
||||||
|
result.push_back(std::string(elem.to_string()));
|
||||||
|
}
|
||||||
|
return std::make_unique<std::vector<std::string>>(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CNixStore::nar_from_path(RVec<unsigned char> base_name, RBox<AsyncWriteSender> sender) {
|
||||||
|
RustSink sink(std::move(sender));
|
||||||
|
|
||||||
|
std::string_view sv((const char *)base_name.data(), base_name.size());
|
||||||
|
nix::StorePath store_path(sv);
|
||||||
|
|
||||||
|
// exceptions will be thrown into Rust
|
||||||
|
this->store->narFromPath(store_path, sink);
|
||||||
|
sink.eof();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<CNixStore> open_nix_store() {
|
||||||
|
return std::make_unique<CNixStore>();
|
||||||
|
}
|
77
attic/src/nix_store/bindings/nix.hpp
Normal file
77
attic/src/nix_store/bindings/nix.hpp
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
// C++ side of the libnixstore glue.
|
||||||
|
//
|
||||||
|
// We implement a mid-level wrapper of the Nix Store interface,
|
||||||
|
// which is then wrapped again in the Rust side to enable full
|
||||||
|
// async-await operation.
|
||||||
|
//
|
||||||
|
// Here we stick with the naming conventions of Rust and handle
|
||||||
|
// Rust types directly where possible, so that the interfaces are
|
||||||
|
// satisfying to use from the Rust side via cxx.rs.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include <iostream>
|
||||||
|
#include <memory>
|
||||||
|
#include <set>
|
||||||
|
#include <nix/store-api.hh>
|
||||||
|
#include <nix/local-store.hh>
|
||||||
|
#include <nix/remote-store.hh>
|
||||||
|
#include <nix/uds-remote-store.hh>
|
||||||
|
#include <nix/hash.hh>
|
||||||
|
#include <nix/path.hh>
|
||||||
|
#include <nix/serialise.hh>
|
||||||
|
#include <rust/cxx.h>
|
||||||
|
|
||||||
|
template<class T> using RVec = rust::Vec<T>;
|
||||||
|
template<class T> using RBox = rust::Box<T>;
|
||||||
|
template<class T> using RSlice = rust::Slice<T>;
|
||||||
|
using RString = rust::String;
|
||||||
|
using RStr = rust::Str;
|
||||||
|
using RBasePathSlice = RSlice<const unsigned char>;
|
||||||
|
|
||||||
|
struct AsyncWriteSender;
|
||||||
|
|
||||||
|
struct RustSink : nix::Sink
|
||||||
|
{
|
||||||
|
RBox<AsyncWriteSender> sender;
|
||||||
|
public:
|
||||||
|
RustSink(RBox<AsyncWriteSender> sender);
|
||||||
|
void operator () (std::string_view data) override;
|
||||||
|
void eof();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Opaque wrapper for nix::ValidPathInfo
|
||||||
|
class CPathInfo {
|
||||||
|
nix::ref<const nix::ValidPathInfo> pi;
|
||||||
|
public:
|
||||||
|
CPathInfo(nix::ref<const nix::ValidPathInfo> pi);
|
||||||
|
nix::Hash nar_hash();
|
||||||
|
uint64_t nar_size();
|
||||||
|
std::unique_ptr<std::vector<std::string>> sigs();
|
||||||
|
std::unique_ptr<std::vector<std::string>> references();
|
||||||
|
RString ca();
|
||||||
|
};
|
||||||
|
|
||||||
|
class CNixStore {
|
||||||
|
std::shared_ptr<nix::Store> store;
|
||||||
|
public:
|
||||||
|
CNixStore();
|
||||||
|
|
||||||
|
RString store_dir();
|
||||||
|
std::unique_ptr<CPathInfo> query_path_info(RBasePathSlice base_name);
|
||||||
|
std::unique_ptr<std::vector<std::string>> compute_fs_closure(
|
||||||
|
RBasePathSlice base_name,
|
||||||
|
bool flip_direction,
|
||||||
|
bool include_outputs,
|
||||||
|
bool include_derivers);
|
||||||
|
std::unique_ptr<std::vector<std::string>> compute_fs_closure_multi(
|
||||||
|
RSlice<const RBasePathSlice> base_names,
|
||||||
|
bool flip_direction,
|
||||||
|
bool include_outputs,
|
||||||
|
bool include_derivers);
|
||||||
|
void nar_from_path(RVec<unsigned char> base_name, RBox<AsyncWriteSender> sender);
|
||||||
|
};
|
||||||
|
|
||||||
|
std::unique_ptr<CNixStore> open_nix_store();
|
||||||
|
|
||||||
|
// Relies on our definitions
|
||||||
|
#include "attic/src/nix_store/bindings/mod.rs.h"
|
298
attic/src/nix_store/mod.rs
Normal file
298
attic/src/nix_store/mod.rs
Normal file
|
@ -0,0 +1,298 @@
|
||||||
|
//! Nix store operations.
|
||||||
|
//!
|
||||||
|
//! ## FFI Bindings
|
||||||
|
//!
|
||||||
|
//! For now, the FFI bindings are for use in the client. We never
|
||||||
|
//! interact with the Nix store on the server. When the `nix_store`
|
||||||
|
//! crate feature is disabled, native Rust portions of this module
|
||||||
|
//! will still function.
|
||||||
|
//!
|
||||||
|
//! We use `libnixstore` to carry out most of the operations.
|
||||||
|
//! To interface with `libnixstore`, we first construct a simpler,
|
||||||
|
//! FFI-friendly API in C++ and then integrate with it using [cxx](https://cxx.rs)
|
||||||
|
//! and [rust-bindgen](https://rust-lang.github.io/rust-bindgen).
|
||||||
|
//! The glue interface is mostly object-oriented, with no pesky
|
||||||
|
//! C-style OOP functions or manual lifetime tracking.
|
||||||
|
//!
|
||||||
|
//! The C++-side code is responsible for translating the calls
|
||||||
|
//! into actual `libnixstore` invocations which are version-specific.
|
||||||
|
//! (we target Nix 2.4 and 2.5).
|
||||||
|
//!
|
||||||
|
//! We have the following goals:
|
||||||
|
//! - Retrieval of store path information
|
||||||
|
//! - Computation of closures
|
||||||
|
//! - Streaming of NAR archives
|
||||||
|
//! - Fully `async`/`await` API with support for concurrency
|
||||||
|
//!
|
||||||
|
//! ## Alternatives?
|
||||||
|
//!
|
||||||
|
//! The Nix source tree includes [`nix-rust`](https://github.com/NixOS/nix/tree/master/nix-rust)
|
||||||
|
//! which contains a limited implementation of various store operations.
|
||||||
|
//! It [used to](https://github.com/NixOS/nix/commit/bbe97dff8b3054d96e758f486f9ce3fa09e64de3)
|
||||||
|
//! contain an implementation of `StorePath` in Rust which was used from C++
|
||||||
|
//! via FFI. It was [removed](https://github.com/NixOS/nix/commit/759947bf72c134592f0ce23d385e48095bd0a301)
|
||||||
|
//! half a year later due to memory consumption concerns. The current
|
||||||
|
//! `nix-rust` contains a set of `libnixstore` bindings, but they are low-level
|
||||||
|
//! and suffering from bitrot.
|
||||||
|
//!
|
||||||
|
//! For easier FFI, there is an attempt to make a C wrapper for `libnixstore` called
|
||||||
|
//! [libnixstore-c](https://github.com/andir/libnixstore-c). It offers
|
||||||
|
//! very limited amount of functionality.
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
mod bindings;
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
mod nix_store;
|
||||||
|
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::os::unix::ffi::OsStrExt;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use regex::Regex;
|
||||||
|
use serde::{de, Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::error::{AtticError, AtticResult};
|
||||||
|
use crate::hash::Hash;
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
pub use bindings::{FfiHash, FfiHashType};
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
pub use nix_store::NixStore;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub mod tests;
|
||||||
|
|
||||||
|
/// Length of the hash in a store path.
|
||||||
|
pub const STORE_PATH_HASH_LEN: usize = 32;
|
||||||
|
|
||||||
|
/// Regex that matches a store path hash, without anchors.
|
||||||
|
pub const STORE_PATH_HASH_REGEX_FRAGMENT: &str = "[0123456789abcdfghijklmnpqrsvwxyz]{32}";
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
/// Regex for a valid store path hash.
|
||||||
|
///
|
||||||
|
/// This is the path portion of a base name.
|
||||||
|
static ref STORE_PATH_HASH_REGEX: Regex = {
|
||||||
|
Regex::new(&format!("^{}$", STORE_PATH_HASH_REGEX_FRAGMENT)).unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Regex for a valid store base name.
|
||||||
|
///
|
||||||
|
/// A base name consists of two parts: A hash and a human-readable
|
||||||
|
/// label/name. The format of the hash is described in `StorePathHash`.
|
||||||
|
///
|
||||||
|
/// The human-readable name can only contain the following characters:
|
||||||
|
///
|
||||||
|
/// - A-Za-z0-9
|
||||||
|
/// - `+-._?=`
|
||||||
|
///
|
||||||
|
/// See the Nix implementation in `src/libstore/path.cc`.
|
||||||
|
static ref STORE_BASE_NAME_REGEX: Regex = {
|
||||||
|
Regex::new(r"^[0123456789abcdfghijklmnpqrsvwxyz]{32}-[A-Za-z0-9+-._?=]+$").unwrap()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A path in a Nix store.
|
||||||
|
///
|
||||||
|
/// This must be a direct child of the store. This path may or
|
||||||
|
/// may not actually exist.
|
||||||
|
///
|
||||||
|
/// This guarantees that the base name is of valid format.
|
||||||
|
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
|
||||||
|
pub struct StorePath {
|
||||||
|
/// Base name of the store path.
|
||||||
|
///
|
||||||
|
/// For example, for `/nix/store/ia70ss13m22znbl8khrf2hq72qmh5drr-ruby-2.7.5`,
|
||||||
|
/// this would be `ia70ss13m22znbl8khrf2hq72qmh5drr-ruby-2.7.5`.
|
||||||
|
base_name: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A fixed-length store path hash.
|
||||||
|
///
|
||||||
|
/// For example, for `/nix/store/ia70ss13m22znbl8khrf2hq72qmh5drr-ruby-2.7.5`,
|
||||||
|
/// this would be `ia70ss13m22znbl8khrf2hq72qmh5drr`.
|
||||||
|
///
|
||||||
|
/// It must contain exactly 32 "base-32 characters". Nix's special scheme
|
||||||
|
/// include the following valid characters: "0123456789abcdfghijklmnpqrsvwxyz"
|
||||||
|
/// ('e', 'o', 'u', 't' are banned).
|
||||||
|
///
|
||||||
|
/// Examples of invalid store path hashes:
|
||||||
|
///
|
||||||
|
/// - "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
|
||||||
|
/// - "IA70SS13M22ZNBL8KHRF2HQ72QMH5DRR"
|
||||||
|
/// - "whatevenisthisthing"
|
||||||
|
#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize)]
|
||||||
|
pub struct StorePathHash(String);
|
||||||
|
|
||||||
|
/// Information on a valid store path.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ValidPathInfo {
|
||||||
|
/// The store path.
|
||||||
|
pub path: StorePath,
|
||||||
|
|
||||||
|
/// Hash of the NAR.
|
||||||
|
pub nar_hash: Hash,
|
||||||
|
|
||||||
|
/// Size of the NAR.
|
||||||
|
pub nar_size: u64,
|
||||||
|
|
||||||
|
/// References.
|
||||||
|
///
|
||||||
|
/// This list only contains base names of the paths.
|
||||||
|
pub references: Vec<PathBuf>,
|
||||||
|
|
||||||
|
/// Signatures.
|
||||||
|
pub sigs: Vec<String>,
|
||||||
|
|
||||||
|
/// Content Address.
|
||||||
|
pub ca: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(not(feature = "nix_store"), allow(dead_code))]
|
||||||
|
impl StorePath {
|
||||||
|
/// Creates a StorePath with a base name.
|
||||||
|
fn from_base_name(base_name: PathBuf) -> AtticResult<Self> {
|
||||||
|
let s = base_name
|
||||||
|
.as_os_str()
|
||||||
|
.to_str()
|
||||||
|
.ok_or_else(|| AtticError::InvalidStorePathName {
|
||||||
|
base_name: base_name.clone(),
|
||||||
|
reason: "Name contains non-UTF-8 characters",
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if !STORE_BASE_NAME_REGEX.is_match(s) {
|
||||||
|
return Err(AtticError::InvalidStorePathName {
|
||||||
|
base_name,
|
||||||
|
reason: "Name is of invalid format",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self { base_name })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a StorePath with a known valid base name.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// The caller must ensure that the name is of a valid format (refer
|
||||||
|
/// to the documentations for `STORE_BASE_NAME_REGEX`). Other operations
|
||||||
|
/// with this object will assume it's valid.
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
unsafe fn from_base_name_unchecked(base_name: PathBuf) -> Self {
|
||||||
|
Self { base_name }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the hash portion of the store path.
|
||||||
|
pub fn to_hash(&self) -> StorePathHash {
|
||||||
|
// Safety: We have already validated the format of the base name,
|
||||||
|
// including the hash part. The name is guaranteed valid UTF-8.
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
unsafe {
|
||||||
|
let s = std::str::from_utf8_unchecked(self.base_name.as_os_str().as_bytes());
|
||||||
|
let hash = s[..STORE_PATH_HASH_LEN].to_string();
|
||||||
|
StorePathHash::new_unchecked(hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the human-readable name.
|
||||||
|
pub fn name(&self) -> String {
|
||||||
|
// Safety: Already checked
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
unsafe {
|
||||||
|
let s = std::str::from_utf8_unchecked(self.base_name.as_os_str().as_bytes());
|
||||||
|
s[STORE_PATH_HASH_LEN + 1..].to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_os_str(&self) -> &OsStr {
|
||||||
|
self.base_name.as_os_str()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(not(feature = "nix_store"), allow(dead_code))]
|
||||||
|
fn as_base_name_bytes(&self) -> &[u8] {
|
||||||
|
self.base_name.as_os_str().as_bytes()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StorePathHash {
|
||||||
|
/// Creates a store path hash from a string.
|
||||||
|
pub fn new(hash: String) -> AtticResult<Self> {
|
||||||
|
if hash.as_bytes().len() != STORE_PATH_HASH_LEN {
|
||||||
|
return Err(AtticError::InvalidStorePathHash {
|
||||||
|
hash,
|
||||||
|
reason: "Hash is of invalid length",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if !STORE_PATH_HASH_REGEX.is_match(&hash) {
|
||||||
|
return Err(AtticError::InvalidStorePathHash {
|
||||||
|
hash,
|
||||||
|
reason: "Hash is of invalid format",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self(hash))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a store path hash from a string, without checking its validity.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// The caller must make sure that it is of expected length and format.
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
pub unsafe fn new_unchecked(hash: String) -> Self {
|
||||||
|
Self(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_str(&self) -> &str {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_string(&self) -> String {
|
||||||
|
self.0.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for StorePathHash {
|
||||||
|
/// Deserializes a potentially-invalid store path hash.
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: de::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
use de::Error;
|
||||||
|
String::deserialize(deserializer)
|
||||||
|
.and_then(|s| Self::new(s).map_err(|e| Error::custom(e.to_string())))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the base store name of a path relative to a store root.
|
||||||
|
#[cfg_attr(not(feature = "nix_store"), allow(dead_code))]
|
||||||
|
fn to_base_name(store_dir: &Path, path: &Path) -> AtticResult<PathBuf> {
|
||||||
|
if let Ok(remaining) = path.strip_prefix(store_dir) {
|
||||||
|
let first = remaining
|
||||||
|
.iter()
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| AtticError::InvalidStorePath {
|
||||||
|
path: path.to_owned(),
|
||||||
|
reason: "Path is store directory itself",
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if first.len() < STORE_PATH_HASH_LEN {
|
||||||
|
Err(AtticError::InvalidStorePath {
|
||||||
|
path: path.to_owned(),
|
||||||
|
reason: "Path is too short",
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
Ok(PathBuf::from(first))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Err(AtticError::InvalidStorePath {
|
||||||
|
path: path.to_owned(),
|
||||||
|
reason: "Path is not in store directory",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
236
attic/src/nix_store/nix_store.rs
Normal file
236
attic/src/nix_store/nix_store.rs
Normal file
|
@ -0,0 +1,236 @@
|
||||||
|
//! High-level Nix Store interface.
|
||||||
|
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::os::unix::ffi::OsStrExt;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use tokio::task::spawn_blocking;
|
||||||
|
|
||||||
|
use super::bindings::{open_nix_store, AsyncWriteAdapter, FfiNixStore};
|
||||||
|
use super::{to_base_name, StorePath, ValidPathInfo};
|
||||||
|
use crate::error::AtticResult;
|
||||||
|
|
||||||
|
/// High-level wrapper for the Unix Domain Socket Nix Store.
|
||||||
|
pub struct NixStore {
|
||||||
|
/// The Nix store FFI.
|
||||||
|
inner: Arc<FfiNixStore>,
|
||||||
|
|
||||||
|
/// Path to the Nix store itself.
|
||||||
|
store_dir: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "nix_store")]
|
||||||
|
impl NixStore {
|
||||||
|
pub fn connect() -> AtticResult<Self> {
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
let inner = unsafe { open_nix_store()? };
|
||||||
|
let store_dir = PathBuf::from(inner.store().store_dir());
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
inner: Arc::new(inner),
|
||||||
|
store_dir,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the Nix store directory.
|
||||||
|
pub fn store_dir(&self) -> &Path {
|
||||||
|
&self.store_dir
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the base store path of a path, following any symlinks.
|
||||||
|
///
|
||||||
|
/// This is a simple wrapper over `parse_store_path` that also
|
||||||
|
/// follows symlinks.
|
||||||
|
pub fn follow_store_path<P: AsRef<Path>>(&self, path: P) -> AtticResult<StorePath> {
|
||||||
|
// Some cases to consider:
|
||||||
|
//
|
||||||
|
// - `/nix/store/eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-nixos-system-x/sw` (a symlink to sw)
|
||||||
|
// - `eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-nixos-system-x`
|
||||||
|
// - We don't resolve the `sw` symlink since the full store path is specified
|
||||||
|
// (this is a design decision)
|
||||||
|
// - `/run/current-system` (a symlink to profile)
|
||||||
|
// - `eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-nixos-system-x`
|
||||||
|
// - `/run/current-system/` (with a trailing slash)
|
||||||
|
// - `eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-nixos-system-x`
|
||||||
|
// - `/run/current-system/sw` (a symlink to sw)
|
||||||
|
// - `eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-system-path` (!)
|
||||||
|
let path = path.as_ref();
|
||||||
|
if path.strip_prefix(&self.store_dir).is_ok() {
|
||||||
|
// Is in the store - directly strip regardless of being a symlink or not
|
||||||
|
self.parse_store_path(path)
|
||||||
|
} else {
|
||||||
|
// Canonicalize then parse
|
||||||
|
let canon = path.canonicalize()?;
|
||||||
|
self.parse_store_path(canon)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the base store path of a path.
|
||||||
|
///
|
||||||
|
/// This function does not validate whether the path is actually in the
|
||||||
|
/// Nix store or not.
|
||||||
|
///
|
||||||
|
/// The path must be under the store directory. See `follow_store_path`
|
||||||
|
/// for an alternative that follows symlinks.
|
||||||
|
pub fn parse_store_path<P: AsRef<Path>>(&self, path: P) -> AtticResult<StorePath> {
|
||||||
|
let base_name = to_base_name(&self.store_dir, path.as_ref())?;
|
||||||
|
StorePath::from_base_name(base_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the full path for a base store path.
|
||||||
|
pub fn get_full_path(&self, store_path: &StorePath) -> PathBuf {
|
||||||
|
self.store_dir.join(&store_path.base_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a NAR archive from a path.
|
||||||
|
///
|
||||||
|
/// This is akin to `nix-store --dump`.
|
||||||
|
pub fn nar_from_path(&self, store_path: StorePath) -> AsyncWriteAdapter {
|
||||||
|
let inner = self.inner.clone();
|
||||||
|
let (adapter, mut sender) = AsyncWriteAdapter::new();
|
||||||
|
let base_name = Vec::from(store_path.as_base_name_bytes());
|
||||||
|
|
||||||
|
spawn_blocking(move || {
|
||||||
|
// Send all exceptions through the channel, and ignore errors
|
||||||
|
// during sending (the channel may have been closed).
|
||||||
|
if let Err(e) = inner.store().nar_from_path(base_name, sender.clone()) {
|
||||||
|
let _ = sender.rust_error(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
adapter
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the closure of a valid path.
|
||||||
|
///
|
||||||
|
/// If `flip_directions` is true, the set of paths that can reach `store_path` is
|
||||||
|
/// returned.
|
||||||
|
pub async fn compute_fs_closure(
|
||||||
|
&self,
|
||||||
|
store_path: StorePath,
|
||||||
|
flip_directions: bool,
|
||||||
|
include_outputs: bool,
|
||||||
|
include_derivers: bool,
|
||||||
|
) -> AtticResult<Vec<StorePath>> {
|
||||||
|
let inner = self.inner.clone();
|
||||||
|
|
||||||
|
spawn_blocking(move || {
|
||||||
|
let base_name = store_path.as_base_name_bytes();
|
||||||
|
|
||||||
|
let cxx_vector = inner.store().compute_fs_closure(
|
||||||
|
base_name,
|
||||||
|
flip_directions,
|
||||||
|
include_outputs,
|
||||||
|
include_derivers,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(cxx_vector
|
||||||
|
.iter()
|
||||||
|
.map(|s| {
|
||||||
|
let osstr = OsStr::from_bytes(s.as_bytes());
|
||||||
|
let pb = PathBuf::from(osstr);
|
||||||
|
|
||||||
|
// Safety: The C++ implementation already checks the StorePath
|
||||||
|
// for correct format (which also implies valid UTF-8)
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
unsafe {
|
||||||
|
StorePath::from_base_name_unchecked(pb)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect())
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the closure of a set of valid paths.
|
||||||
|
///
|
||||||
|
/// This is the multi-path variant of `compute_fs_closure`.
|
||||||
|
/// If `flip_directions` is true, the set of paths that can reach `store_path` is
|
||||||
|
/// returned.
|
||||||
|
pub async fn compute_fs_closure_multi(
|
||||||
|
&self,
|
||||||
|
store_paths: Vec<StorePath>,
|
||||||
|
flip_directions: bool,
|
||||||
|
include_outputs: bool,
|
||||||
|
include_derivers: bool,
|
||||||
|
) -> AtticResult<Vec<StorePath>> {
|
||||||
|
let inner = self.inner.clone();
|
||||||
|
|
||||||
|
spawn_blocking(move || {
|
||||||
|
let plain_base_names: Vec<&[u8]> = store_paths
|
||||||
|
.iter()
|
||||||
|
.map(|sp| sp.as_base_name_bytes())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let cxx_vector = inner.store().compute_fs_closure_multi(
|
||||||
|
&plain_base_names,
|
||||||
|
flip_directions,
|
||||||
|
include_outputs,
|
||||||
|
include_derivers,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(cxx_vector
|
||||||
|
.iter()
|
||||||
|
.map(|s| {
|
||||||
|
let osstr = OsStr::from_bytes(s.as_bytes());
|
||||||
|
let pb = PathBuf::from(osstr);
|
||||||
|
|
||||||
|
// Safety: The C++ implementation already checks the StorePath
|
||||||
|
// for correct format (which also implies valid UTF-8)
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
unsafe {
|
||||||
|
StorePath::from_base_name_unchecked(pb)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect())
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns detailed information on a path.
|
||||||
|
pub async fn query_path_info(&self, store_path: StorePath) -> AtticResult<ValidPathInfo> {
|
||||||
|
let inner = self.inner.clone();
|
||||||
|
|
||||||
|
spawn_blocking(move || {
|
||||||
|
let base_name = store_path.as_base_name_bytes();
|
||||||
|
let mut c_path_info = inner.store().query_path_info(base_name)?;
|
||||||
|
|
||||||
|
// FIXME: Make this more ergonomic and efficient
|
||||||
|
let nar_size = c_path_info.pin_mut().nar_size();
|
||||||
|
let nar_hash = c_path_info.pin_mut().nar_hash();
|
||||||
|
let references = c_path_info
|
||||||
|
.pin_mut()
|
||||||
|
.references()
|
||||||
|
.iter()
|
||||||
|
.map(|s| {
|
||||||
|
let osstr = OsStr::from_bytes(s.as_bytes());
|
||||||
|
PathBuf::from(osstr)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
let sigs = c_path_info
|
||||||
|
.pin_mut()
|
||||||
|
.sigs()
|
||||||
|
.iter()
|
||||||
|
.map(|s| {
|
||||||
|
let osstr = OsStr::from_bytes(s.as_bytes());
|
||||||
|
osstr.to_str().unwrap().to_string()
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
let ca = c_path_info.pin_mut().ca();
|
||||||
|
|
||||||
|
Ok(ValidPathInfo {
|
||||||
|
path: store_path,
|
||||||
|
nar_size,
|
||||||
|
nar_hash: nar_hash.into_rust()?,
|
||||||
|
references,
|
||||||
|
sigs,
|
||||||
|
ca: if ca.is_empty() { None } else { Some(ca) },
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
}
|
1
attic/src/nix_store/tests/.gitattributes
vendored
Normal file
1
attic/src/nix_store/tests/.gitattributes
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
* -text
|
14
attic/src/nix_store/tests/README.md
Normal file
14
attic/src/nix_store/tests/README.md
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
# Tests
|
||||||
|
|
||||||
|
The included tests require trusted user access to import the test NAR dumps.
|
||||||
|
|
||||||
|
## Test Derivations
|
||||||
|
|
||||||
|
To keep things minimal, we have a couple of polyglot derivations that double as their builders in `drv`.
|
||||||
|
They result in the following store paths when built:
|
||||||
|
|
||||||
|
- `no-deps.nix` -> `/nix/store/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps`
|
||||||
|
- `with-deps.nix` -> `/nix/store/7wp86qa87v2pwh6sr2a02qci0h71rs9z-attic-test-with-deps`
|
||||||
|
|
||||||
|
NAR dumps for those store paths are included in `nar`.
|
||||||
|
`.nar` files are produced by `nix-store --export`, and `.export` files are produced by `nix-store --export`.
|
7
attic/src/nix_store/tests/drv/no-deps.nix
Executable file
7
attic/src/nix_store/tests/drv/no-deps.nix
Executable file
|
@ -0,0 +1,7 @@
|
||||||
|
#!/bin/sh
|
||||||
|
/*/sh -c "echo Hi! I have no dependencies. > $out"; exit 0; */
|
||||||
|
derivation {
|
||||||
|
name = "attic-test-no-deps";
|
||||||
|
builder = ./no-deps.nix;
|
||||||
|
system = "x86_64-linux";
|
||||||
|
}
|
21
attic/src/nix_store/tests/drv/with-deps.nix
Executable file
21
attic/src/nix_store/tests/drv/with-deps.nix
Executable file
|
@ -0,0 +1,21 @@
|
||||||
|
#!/bin/sh
|
||||||
|
/*/sh -c "echo Hi! I depend on $dep. > $out"; exit 0; */
|
||||||
|
let
|
||||||
|
a = derivation {
|
||||||
|
name = "attic-test-with-deps-a";
|
||||||
|
builder = ./with-deps.nix;
|
||||||
|
system = "x86_64-linux";
|
||||||
|
dep = b;
|
||||||
|
};
|
||||||
|
b = derivation {
|
||||||
|
name = "attic-test-with-deps-b";
|
||||||
|
builder = ./with-deps.nix;
|
||||||
|
system = "x86_64-linux";
|
||||||
|
dep = c;
|
||||||
|
};
|
||||||
|
c = derivation {
|
||||||
|
name = "attic-test-with-deps-c-final";
|
||||||
|
builder = ./with-deps.nix;
|
||||||
|
system = "x86_64-linux";
|
||||||
|
};
|
||||||
|
in a
|
255
attic/src/nix_store/tests/mod.rs
Normal file
255
attic/src/nix_store/tests/mod.rs
Normal file
|
@ -0,0 +1,255 @@
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::ffi::OsStr;
|
||||||
|
use std::os::unix::ffi::OsStrExt;
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
use serde::de::DeserializeOwned;
|
||||||
|
use tokio_test::block_on;
|
||||||
|
|
||||||
|
pub mod test_nar;
|
||||||
|
|
||||||
|
fn connect() -> NixStore {
|
||||||
|
NixStore::connect().expect("Failed to connect to the Nix store")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Evaluates a Nix expression using the command-line interface.
|
||||||
|
fn cli_eval<T>(expression: &str) -> T
|
||||||
|
where
|
||||||
|
T: DeserializeOwned,
|
||||||
|
{
|
||||||
|
let cli = Command::new("nix-instantiate")
|
||||||
|
.args(["--eval", "--json", "-E", expression])
|
||||||
|
.output()
|
||||||
|
.expect("Failed to evaluate");
|
||||||
|
|
||||||
|
if !cli.status.success() {
|
||||||
|
panic!("Evaluation of '{}' failed: {:?}", expression, cli.status);
|
||||||
|
}
|
||||||
|
|
||||||
|
let json = std::str::from_utf8(&cli.stdout).expect("Result not valid UTF-8");
|
||||||
|
|
||||||
|
serde_json::from_str(json).expect("Failed to parse output")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn assert_base_name(store: &str, path: &str, expected: &str) {
|
||||||
|
let expected = PathBuf::from(expected);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
expected,
|
||||||
|
to_base_name(store.as_ref(), path.as_ref()).unwrap(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn assert_base_name_err(store: &str, path: &str, err: &str) {
|
||||||
|
let e = to_base_name(store.as_ref(), path.as_ref()).unwrap_err();
|
||||||
|
|
||||||
|
if let AtticError::InvalidStorePath { path: _, reason } = e {
|
||||||
|
assert!(reason.contains(err));
|
||||||
|
} else {
|
||||||
|
panic!("to_base_name didn't return an InvalidStorePath");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_connect() {
|
||||||
|
connect();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_store_dir() {
|
||||||
|
let store = connect();
|
||||||
|
let expected: PathBuf = cli_eval("builtins.storeDir");
|
||||||
|
assert_eq!(store.store_dir(), expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_to_base_name() {
|
||||||
|
assert_base_name(
|
||||||
|
"/nix/store",
|
||||||
|
"/nix/store/3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0",
|
||||||
|
"3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0",
|
||||||
|
);
|
||||||
|
assert_base_name(
|
||||||
|
"/gnu/store",
|
||||||
|
"/gnu/store/3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0/",
|
||||||
|
"3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0",
|
||||||
|
);
|
||||||
|
assert_base_name(
|
||||||
|
"/nix/store",
|
||||||
|
"/nix/store/3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0/bin/firefox",
|
||||||
|
"3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0",
|
||||||
|
);
|
||||||
|
assert_base_name_err(
|
||||||
|
"/gnu/store",
|
||||||
|
"/nix/store/3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0",
|
||||||
|
"Path is not in store directory",
|
||||||
|
);
|
||||||
|
assert_base_name_err("/nix/store", "/nix/store", "Path is store directory itself");
|
||||||
|
assert_base_name_err(
|
||||||
|
"/nix/store",
|
||||||
|
"/nix/store/",
|
||||||
|
"Path is store directory itself",
|
||||||
|
);
|
||||||
|
assert_base_name_err("/nix/store", "/nix/store/tooshort", "Path is too short");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_base_name() {
|
||||||
|
let bn = PathBuf::from("ia70ss13m22znbl8khrf2hq72qmh5drr-ruby-2.7.5");
|
||||||
|
StorePath::from_base_name(bn).unwrap();
|
||||||
|
|
||||||
|
// name has invalid UTF-8
|
||||||
|
let osstr = OsStr::from_bytes(b"ia70ss13m22znbl8khrf2hq72qmh5drr-\xc3");
|
||||||
|
let bn = PathBuf::from(osstr);
|
||||||
|
StorePath::from_base_name(bn).unwrap_err();
|
||||||
|
|
||||||
|
// hash has bad characters
|
||||||
|
let bn = PathBuf::from("eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-ruby-2.7.5");
|
||||||
|
StorePath::from_base_name(bn).unwrap_err();
|
||||||
|
|
||||||
|
// name has bad characters
|
||||||
|
let bn = PathBuf::from("ia70ss13m22znbl8khrf2hq72qmh5drr-shocking!!!");
|
||||||
|
StorePath::from_base_name(bn).unwrap_err();
|
||||||
|
|
||||||
|
// name portion empty
|
||||||
|
let bn = PathBuf::from("ia70ss13m22znbl8khrf2hq72qmh5drr-");
|
||||||
|
StorePath::from_base_name(bn).unwrap_err();
|
||||||
|
|
||||||
|
// no name portion
|
||||||
|
let bn = PathBuf::from("ia70ss13m22znbl8khrf2hq72qmh5drr");
|
||||||
|
StorePath::from_base_name(bn).unwrap_err();
|
||||||
|
|
||||||
|
// too short
|
||||||
|
let bn = PathBuf::from("ia70ss13m22znbl8khrf2hq");
|
||||||
|
StorePath::from_base_name(bn).unwrap_err();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_store_path_hash() {
|
||||||
|
// valid base-32 hash
|
||||||
|
let h = "ia70ss13m22znbl8khrf2hq72qmh5drr".to_string();
|
||||||
|
StorePathHash::new(h).unwrap();
|
||||||
|
|
||||||
|
// invalid characters
|
||||||
|
let h = "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee".to_string();
|
||||||
|
StorePathHash::new(h).unwrap_err();
|
||||||
|
let h = "IA70SS13M22ZNBL8KHRF2HQ72QMH5DRR".to_string();
|
||||||
|
StorePathHash::new(h).unwrap_err();
|
||||||
|
|
||||||
|
// too short
|
||||||
|
let h = "ia70ss13m22znbl8khrf2hq".to_string();
|
||||||
|
StorePathHash::new(h).unwrap_err();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_nar_streaming() {
|
||||||
|
let store = NixStore::connect().expect("Failed to connect to the Nix store");
|
||||||
|
|
||||||
|
block_on(async move {
|
||||||
|
let test_nar = test_nar::NO_DEPS;
|
||||||
|
test_nar.import().await.expect("Could not import test NAR");
|
||||||
|
|
||||||
|
let target = test_nar.get_target().expect("Could not create dump target");
|
||||||
|
let writer = target.get_writer().await.expect("Could not get writer");
|
||||||
|
|
||||||
|
let store_path = store.parse_store_path(test_nar.path()).unwrap();
|
||||||
|
|
||||||
|
let stream = store.nar_from_path(store_path);
|
||||||
|
stream.write_all(writer).await.unwrap();
|
||||||
|
|
||||||
|
target
|
||||||
|
.validate()
|
||||||
|
.await
|
||||||
|
.expect("Could not validate resulting dump");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_compute_fs_closure() {
|
||||||
|
let store = NixStore::connect().expect("Failed to connect to the Nix store");
|
||||||
|
|
||||||
|
block_on(async move {
|
||||||
|
use test_nar::{WITH_DEPS_A, WITH_DEPS_B, WITH_DEPS_C};
|
||||||
|
|
||||||
|
for nar in [WITH_DEPS_C, WITH_DEPS_B, WITH_DEPS_A] {
|
||||||
|
nar.import().await.expect("Could not import test NAR");
|
||||||
|
|
||||||
|
let path = store
|
||||||
|
.parse_store_path(nar.path())
|
||||||
|
.expect("Could not parse store path");
|
||||||
|
|
||||||
|
let actual: HashSet<StorePath> = store
|
||||||
|
.compute_fs_closure(path, false, false, false)
|
||||||
|
.await
|
||||||
|
.expect("Could not compute closure")
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
assert_eq!(nar.closure(), actual);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_compute_fs_closure_multi() {
|
||||||
|
let store = NixStore::connect().expect("Failed to connect to the Nix store");
|
||||||
|
|
||||||
|
block_on(async move {
|
||||||
|
use test_nar::{NO_DEPS, WITH_DEPS_A, WITH_DEPS_B, WITH_DEPS_C};
|
||||||
|
|
||||||
|
for nar in [NO_DEPS, WITH_DEPS_C, WITH_DEPS_B, WITH_DEPS_A] {
|
||||||
|
nar.import().await.expect("Could not import test NAR");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut expected = NO_DEPS.closure();
|
||||||
|
expected.extend(WITH_DEPS_A.closure());
|
||||||
|
|
||||||
|
let paths = vec![
|
||||||
|
store.parse_store_path(WITH_DEPS_A.path()).unwrap(),
|
||||||
|
store.parse_store_path(NO_DEPS.path()).unwrap(),
|
||||||
|
];
|
||||||
|
|
||||||
|
let actual: HashSet<StorePath> = store
|
||||||
|
.compute_fs_closure_multi(paths, false, false, false)
|
||||||
|
.await
|
||||||
|
.expect("Could not compute closure")
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
eprintln!("Closure: {:#?}", actual);
|
||||||
|
|
||||||
|
assert_eq!(expected, actual);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_query_path_info() {
|
||||||
|
let store = NixStore::connect().expect("Failed to connect to the Nix store");
|
||||||
|
|
||||||
|
block_on(async move {
|
||||||
|
use test_nar::{WITH_DEPS_B, WITH_DEPS_C};
|
||||||
|
|
||||||
|
for nar in [WITH_DEPS_C, WITH_DEPS_B] {
|
||||||
|
nar.import().await.expect("Could not import test NAR");
|
||||||
|
}
|
||||||
|
|
||||||
|
let nar = WITH_DEPS_B;
|
||||||
|
let path = store.parse_store_path(nar.path()).unwrap();
|
||||||
|
let path_info = store
|
||||||
|
.query_path_info(path)
|
||||||
|
.await
|
||||||
|
.expect("Could not query path info");
|
||||||
|
|
||||||
|
eprintln!("Path info: {:?}", path_info);
|
||||||
|
|
||||||
|
assert_eq!(nar.nar().len() as u64, path_info.nar_size);
|
||||||
|
assert_eq!(
|
||||||
|
vec![PathBuf::from(
|
||||||
|
"3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final"
|
||||||
|
),],
|
||||||
|
path_info.references
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
Hi! I depend on .
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1 @@
|
||||||
|
Hi! I depend on /nix/store/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1 @@
|
||||||
|
Hi! I depend on /nix/store/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1 @@
|
||||||
|
Hi! I have no dependencies.
|
Binary file not shown.
Binary file not shown.
245
attic/src/nix_store/tests/test_nar.rs
Normal file
245
attic/src/nix_store/tests/test_nar.rs
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
//! Utilities for testing the NAR dump functionality.
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::io;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::process::Stdio;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use tempfile::NamedTempFile;
|
||||||
|
use tokio::fs::{File, OpenOptions};
|
||||||
|
use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
|
use tokio::process::Command;
|
||||||
|
|
||||||
|
use crate::error::AtticResult;
|
||||||
|
use crate::nix_store::StorePath;
|
||||||
|
|
||||||
|
/// Expected values for `nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps`.
|
||||||
|
pub const NO_DEPS: TestNar = TestNar {
|
||||||
|
store_path: "/nix/store/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps",
|
||||||
|
original_file: include_bytes!("nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps"),
|
||||||
|
nar: include_bytes!("nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.nar"),
|
||||||
|
export: include_bytes!("nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.export"),
|
||||||
|
closure: &["nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps"],
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Expected values for `n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a`.
|
||||||
|
///
|
||||||
|
/// This depends on `544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b` as well
|
||||||
|
/// as `3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final`.
|
||||||
|
pub const WITH_DEPS_A: TestNar = TestNar {
|
||||||
|
store_path: "/nix/store/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a",
|
||||||
|
original_file: include_bytes!("nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a"),
|
||||||
|
nar: include_bytes!("nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.nar"),
|
||||||
|
export: include_bytes!("nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.export"),
|
||||||
|
closure: &[
|
||||||
|
"n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a",
|
||||||
|
"544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b",
|
||||||
|
"3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final",
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Expected values for `544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b`.
|
||||||
|
///
|
||||||
|
/// This depends on `3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final`.
|
||||||
|
pub const WITH_DEPS_B: TestNar = TestNar {
|
||||||
|
store_path: "/nix/store/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b",
|
||||||
|
original_file: include_bytes!("nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b"),
|
||||||
|
nar: include_bytes!("nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.nar"),
|
||||||
|
export: include_bytes!("nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.export"),
|
||||||
|
closure: &[
|
||||||
|
"544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b",
|
||||||
|
"3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final",
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Expected values for `3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final`.
|
||||||
|
pub const WITH_DEPS_C: TestNar = TestNar {
|
||||||
|
store_path: "/nix/store/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final",
|
||||||
|
original_file: include_bytes!(
|
||||||
|
"nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final"
|
||||||
|
),
|
||||||
|
nar: include_bytes!("nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.nar"),
|
||||||
|
export: include_bytes!(
|
||||||
|
"nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.export"
|
||||||
|
),
|
||||||
|
closure: &["3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final"],
|
||||||
|
};
|
||||||
|
|
||||||
|
/// A test NAR.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct TestNar {
|
||||||
|
/// Full path in the Nix Store when imported.
|
||||||
|
store_path: &'static str,
|
||||||
|
|
||||||
|
/// The original file.
|
||||||
|
original_file: &'static [u8],
|
||||||
|
|
||||||
|
/// A NAR dump without path metadata.
|
||||||
|
nar: &'static [u8],
|
||||||
|
|
||||||
|
/// An importable NAR dump produced by `nix-store --export`.
|
||||||
|
export: &'static [u8],
|
||||||
|
|
||||||
|
/// The expected closure.
|
||||||
|
closure: &'static [&'static str],
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A target that can receive and verify a NAR dump.
|
||||||
|
pub struct NarDump {
|
||||||
|
/// The produced NAR dump.
|
||||||
|
actual: NamedTempFile,
|
||||||
|
|
||||||
|
/// The expected values.
|
||||||
|
expected: TestNar,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct NarDumpWriter {
|
||||||
|
file: File,
|
||||||
|
_lifetime: Arc<NarDump>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TestNar {
|
||||||
|
/// Attempts to import the NAR into the local Nix Store.
|
||||||
|
///
|
||||||
|
/// This requires the current user to be trusted by the nix-daemon.
|
||||||
|
pub async fn import(&self) -> io::Result<()> {
|
||||||
|
let mut child = Command::new("nix-store")
|
||||||
|
.arg("--import")
|
||||||
|
.stdin(Stdio::piped())
|
||||||
|
.stdout(Stdio::piped())
|
||||||
|
.spawn()?;
|
||||||
|
|
||||||
|
let mut stdin = child.stdin.take().unwrap();
|
||||||
|
stdin.write_all(self.export).await?;
|
||||||
|
drop(stdin);
|
||||||
|
|
||||||
|
let output = child.wait_with_output().await?;
|
||||||
|
if !output.status.success() {
|
||||||
|
let e = format!("Nix exit with code {:?}", output.status.code());
|
||||||
|
return Err(io::Error::new(io::ErrorKind::Other, e));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure that we imported the correct thing
|
||||||
|
let store_path = String::from_utf8_lossy(&output.stdout);
|
||||||
|
let store_path = store_path.trim_end();
|
||||||
|
if store_path != self.store_path {
|
||||||
|
let e = format!(
|
||||||
|
"Import resulted in \"{}\", but we want \"{}\"",
|
||||||
|
store_path, self.store_path
|
||||||
|
);
|
||||||
|
return Err(io::Error::new(io::ErrorKind::Other, e));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the full store path that will be present when imported.
|
||||||
|
pub fn path(&self) -> &Path {
|
||||||
|
Path::new(self.store_path)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the closure of the store path.
|
||||||
|
pub fn closure(&self) -> HashSet<StorePath> {
|
||||||
|
self.closure
|
||||||
|
.iter()
|
||||||
|
.map(|bp| {
|
||||||
|
let bp = PathBuf::from(bp);
|
||||||
|
StorePath::from_base_name(bp)
|
||||||
|
})
|
||||||
|
.collect::<AtticResult<HashSet<StorePath>>>()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the raw expected NAR.
|
||||||
|
pub fn nar(&self) -> &[u8] {
|
||||||
|
self.nar
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new test target.
|
||||||
|
pub fn get_target(&self) -> io::Result<Arc<NarDump>> {
|
||||||
|
let target = NarDump::new(self.clone())?;
|
||||||
|
Ok(Arc::new(target))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NarDump {
|
||||||
|
/// Creates a new dump target.
|
||||||
|
fn new(expected: TestNar) -> io::Result<Self> {
|
||||||
|
Ok(Self {
|
||||||
|
actual: NamedTempFile::new()?,
|
||||||
|
expected,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a handle to write to the buffer.
|
||||||
|
pub async fn get_writer(self: &Arc<Self>) -> io::Result<Box<NarDumpWriter>> {
|
||||||
|
let file = OpenOptions::new()
|
||||||
|
.read(false)
|
||||||
|
.write(true)
|
||||||
|
.open(self.actual.path())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(Box::new(NarDumpWriter {
|
||||||
|
file,
|
||||||
|
_lifetime: self.clone(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validates the resulting dump against expected values.
|
||||||
|
pub async fn validate(&self) -> io::Result<()> {
|
||||||
|
let mut file = File::open(self.actual.path()).await?;
|
||||||
|
|
||||||
|
let metadata = file.metadata().await?;
|
||||||
|
if metadata.len() != self.expected.nar.len() as u64 {
|
||||||
|
let e = format!(
|
||||||
|
"Length mismatch - Got {}, should be {}",
|
||||||
|
metadata.len(),
|
||||||
|
self.expected.nar.len()
|
||||||
|
);
|
||||||
|
return Err(io::Error::new(io::ErrorKind::InvalidData, e));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut bytes = Vec::new();
|
||||||
|
file.read_to_end(&mut bytes).await?;
|
||||||
|
if bytes != self.expected.nar {
|
||||||
|
assert_eq!(bytes.len(), self.expected.nar.len());
|
||||||
|
|
||||||
|
for i in 0..bytes.len() {
|
||||||
|
if bytes[i] != self.expected.nar[i] {
|
||||||
|
eprintln!(
|
||||||
|
"Byte {} mismatch - We got {}, should be {}",
|
||||||
|
i, bytes[i], self.expected.nar[i]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
"Content mismatch",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsyncWrite for NarDumpWriter {
|
||||||
|
fn poll_write(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &[u8],
|
||||||
|
) -> Poll<io::Result<usize>> {
|
||||||
|
Pin::new(&mut self.file).poll_write(cx, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
Pin::new(&mut self.file).poll_flush(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
Pin::new(&mut self.file).poll_shutdown(cx)
|
||||||
|
}
|
||||||
|
}
|
271
attic/src/signing/mod.rs
Normal file
271
attic/src/signing/mod.rs
Normal file
|
@ -0,0 +1,271 @@
|
||||||
|
//! Object Signing and Verification.
|
||||||
|
//!
|
||||||
|
//! Nix utilitizes Ed25519 to generate signatures on NAR hashes. Currently
|
||||||
|
//! we can either generate signatures on the fly per request, or cache them
|
||||||
|
//! in the data store.
|
||||||
|
//!
|
||||||
|
//! ## String format
|
||||||
|
//!
|
||||||
|
//! All signing-related strings in Nix follow the same format (henceforth
|
||||||
|
//! "the canonical format"):
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! {keyName}:{base64Payload}
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! We follow the same format, so keys generated using the Nix CLI will
|
||||||
|
//! simply work.
|
||||||
|
//!
|
||||||
|
//! ## Serde
|
||||||
|
//!
|
||||||
|
//! `Serialize` and `Deserialize` are implemented to convert the structs
|
||||||
|
//! from and to the canonical format.
|
||||||
|
|
||||||
|
use std::convert::TryInto;
|
||||||
|
|
||||||
|
use serde::{de, ser, Deserialize, Serialize};
|
||||||
|
|
||||||
|
use base64::DecodeError;
|
||||||
|
use displaydoc::Display;
|
||||||
|
use ed25519_compact::{Error as SignatureError, KeyPair, PublicKey, Signature};
|
||||||
|
|
||||||
|
use crate::error::AtticResult;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
/// An ed25519 keypair for signing.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct NixKeypair {
|
||||||
|
/// Name of this key.
|
||||||
|
name: String,
|
||||||
|
|
||||||
|
/// The keypair.
|
||||||
|
keypair: KeyPair,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An ed25519 public key for verification.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct NixPublicKey {
|
||||||
|
/// Name of this key.
|
||||||
|
name: String,
|
||||||
|
|
||||||
|
/// The public key.
|
||||||
|
public: PublicKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A signing error.
|
||||||
|
#[derive(Debug, Display)]
|
||||||
|
#[ignore_extra_doc_attributes]
|
||||||
|
pub enum Error {
|
||||||
|
/// Signature error: {0}
|
||||||
|
SignatureError(SignatureError),
|
||||||
|
|
||||||
|
/// The string has a wrong key name attached to it: Our name is "{our_name}" and the string has "{string_name}"
|
||||||
|
WrongKeyName {
|
||||||
|
our_name: String,
|
||||||
|
string_name: String,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// The string lacks a colon separator.
|
||||||
|
NoColonSeparator,
|
||||||
|
|
||||||
|
/// The name portion of the string is blank.
|
||||||
|
BlankKeyName,
|
||||||
|
|
||||||
|
/// The payload portion of the string is blank.
|
||||||
|
BlankPayload,
|
||||||
|
|
||||||
|
/// Base64 decode error: {0}
|
||||||
|
Base64DecodeError(DecodeError),
|
||||||
|
|
||||||
|
/// Invalid base64 payload length: Expected {expected} ({usage}), got {actual}
|
||||||
|
InvalidPayloadLength {
|
||||||
|
expected: usize,
|
||||||
|
actual: usize,
|
||||||
|
usage: &'static str,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Invalid signing key name "{0}".
|
||||||
|
///
|
||||||
|
/// A valid name cannot be empty and must be contain colons (:).
|
||||||
|
InvalidSigningKeyName(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NixKeypair {
|
||||||
|
/// Generates a new keypair.
|
||||||
|
pub fn generate(name: &str) -> AtticResult<Self> {
|
||||||
|
// TODO: Make this configurable?
|
||||||
|
let keypair = KeyPair::generate();
|
||||||
|
|
||||||
|
validate_name(name)?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
name: name.to_string(),
|
||||||
|
keypair,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Imports an existing keypair from its canonical representation.
|
||||||
|
pub fn from_str(keypair: &str) -> AtticResult<Self> {
|
||||||
|
let (name, bytes) = decode_string(keypair, "keypair", KeyPair::BYTES, None)?;
|
||||||
|
|
||||||
|
let keypair = KeyPair::from_slice(&bytes).map_err(Error::SignatureError)?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
name: name.to_string(),
|
||||||
|
keypair,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the canonical representation of the keypair.
|
||||||
|
///
|
||||||
|
/// This results in a 64-byte base64 payload that contains both the private
|
||||||
|
/// key and the public key, in that order.
|
||||||
|
///
|
||||||
|
/// For example, it can look like:
|
||||||
|
/// attic-test:msdoldbtlongtt0/xkzmcbqihd7yvy8iomajqhnkutsl3b1pyyyc0mgg2rs0ttzzuyuk9rb2zphvtpes71mlha==
|
||||||
|
pub fn export_keypair(&self) -> String {
|
||||||
|
format!("{}:{}", self.name, base64::encode(*self.keypair))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the canonical representation of the public key.
|
||||||
|
///
|
||||||
|
/// For example, it can look like:
|
||||||
|
/// attic-test:C929acssgtJoINkUtLbc81GFJPUW9maR77TxEu9ZpRw=
|
||||||
|
pub fn export_public_key(&self) -> String {
|
||||||
|
format!("{}:{}", self.name, base64::encode(*self.keypair.pk))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the public key portion of the keypair.
|
||||||
|
pub fn to_public_key(&self) -> NixPublicKey {
|
||||||
|
NixPublicKey {
|
||||||
|
name: self.name.clone(),
|
||||||
|
public: self.keypair.pk,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Signs a message, returning its canonical representation.
|
||||||
|
pub fn sign(&self, message: &[u8]) -> String {
|
||||||
|
let bytes = self.keypair.sk.sign(message, None);
|
||||||
|
format!("{}:{}", self.name, base64::encode(bytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verifies a message.
|
||||||
|
pub fn verify(&self, message: &[u8], signature: &str) -> AtticResult<()> {
|
||||||
|
let (_, bytes) = decode_string(signature, "signature", Signature::BYTES, Some(&self.name))?;
|
||||||
|
|
||||||
|
let bytes: [u8; Signature::BYTES] = bytes.try_into().unwrap();
|
||||||
|
let signature = Signature::from_slice(&bytes).map_err(Error::SignatureError)?;
|
||||||
|
|
||||||
|
self.keypair
|
||||||
|
.pk
|
||||||
|
.verify(message, &signature)
|
||||||
|
.map_err(|e| Error::SignatureError(e).into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for NixKeypair {
|
||||||
|
/// Deserializes a potentially-invalid Nix keypair from its canonical representation.
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: de::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
use de::Error;
|
||||||
|
String::deserialize(deserializer)
|
||||||
|
.and_then(|s| Self::from_str(&s).map_err(|e| Error::custom(e.to_string())))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for NixKeypair {
|
||||||
|
/// Serializes a Nix keypair to its canonical representation.
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: ser::Serializer,
|
||||||
|
{
|
||||||
|
serializer.serialize_str(&self.export_keypair())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NixPublicKey {
|
||||||
|
/// Imports an existing public key from its canonical representation.
|
||||||
|
pub fn from_str(public_key: &str) -> AtticResult<Self> {
|
||||||
|
let (name, bytes) = decode_string(public_key, "public key", PublicKey::BYTES, None)?;
|
||||||
|
|
||||||
|
let public = PublicKey::from_slice(&bytes).map_err(Error::SignatureError)?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
name: name.to_string(),
|
||||||
|
public,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the Nix-compatible textual representation of the public key.
|
||||||
|
///
|
||||||
|
/// For example, it can look like:
|
||||||
|
/// attic-test:C929acssgtJoINkUtLbc81GFJPUW9maR77TxEu9ZpRw=
|
||||||
|
pub fn export(&self) -> String {
|
||||||
|
format!("{}:{}", self.name, base64::encode(*self.public))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verifies a message.
|
||||||
|
pub fn verify(&self, message: &[u8], signature: &str) -> AtticResult<()> {
|
||||||
|
let (_, bytes) = decode_string(signature, "signature", Signature::BYTES, Some(&self.name))?;
|
||||||
|
|
||||||
|
let bytes: [u8; Signature::BYTES] = bytes.try_into().unwrap();
|
||||||
|
let signature = Signature::from_slice(&bytes).map_err(Error::SignatureError)?;
|
||||||
|
|
||||||
|
self.public
|
||||||
|
.verify(message, &signature)
|
||||||
|
.map_err(|e| Error::SignatureError(e).into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validates the name/label of a signing key.
|
||||||
|
///
|
||||||
|
/// A valid name cannot be empty and must not contain colons (:).
|
||||||
|
fn validate_name(name: &str) -> AtticResult<()> {
|
||||||
|
if name.is_empty() || name.find(':').is_some() {
|
||||||
|
Err(Error::InvalidSigningKeyName(name.to_string()).into())
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decodes a colon-delimited string containing a key name and a base64 payload.
|
||||||
|
fn decode_string<'s>(
|
||||||
|
s: &'s str,
|
||||||
|
usage: &'static str,
|
||||||
|
expected_payload_length: usize,
|
||||||
|
expected_name: Option<&str>,
|
||||||
|
) -> AtticResult<(&'s str, Vec<u8>)> {
|
||||||
|
let colon = s.find(':').ok_or(Error::NoColonSeparator)?;
|
||||||
|
|
||||||
|
let (name, colon_and_payload) = s.split_at(colon);
|
||||||
|
|
||||||
|
validate_name(name)?;
|
||||||
|
|
||||||
|
// don't bother decoding base64 if the name doesn't match
|
||||||
|
if let Some(expected_name) = expected_name {
|
||||||
|
if expected_name != name {
|
||||||
|
return Err(Error::WrongKeyName {
|
||||||
|
our_name: expected_name.to_string(),
|
||||||
|
string_name: name.to_string(),
|
||||||
|
}
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let bytes = base64::decode(&colon_and_payload[1..]).map_err(Error::Base64DecodeError)?;
|
||||||
|
|
||||||
|
if bytes.len() != expected_payload_length {
|
||||||
|
return Err(Error::InvalidPayloadLength {
|
||||||
|
actual: bytes.len(),
|
||||||
|
expected: expected_payload_length,
|
||||||
|
usage,
|
||||||
|
}
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((name, bytes))
|
||||||
|
}
|
68
attic/src/signing/tests.rs
Normal file
68
attic/src/signing/tests.rs
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_generate_key() {
|
||||||
|
let keypair = NixKeypair::generate("attic-test").expect("Could not generate key");
|
||||||
|
|
||||||
|
let export_priv = keypair.export_keypair();
|
||||||
|
let export_pub = keypair.export_public_key();
|
||||||
|
|
||||||
|
eprintln!("Private key: {}", export_priv);
|
||||||
|
eprintln!(" Public key: {}", export_pub);
|
||||||
|
|
||||||
|
// re-import keypair
|
||||||
|
let import = NixKeypair::from_str(&export_priv).expect("Could not re-import generated key");
|
||||||
|
|
||||||
|
assert_eq!(keypair.name, import.name);
|
||||||
|
assert_eq!(keypair.keypair, import.keypair);
|
||||||
|
|
||||||
|
// re-import public key
|
||||||
|
let import_pub = NixPublicKey::from_str(&export_pub).expect("Could not re-import public key");
|
||||||
|
|
||||||
|
assert_eq!(keypair.name, import_pub.name);
|
||||||
|
assert_eq!(keypair.keypair.pk, import_pub.public);
|
||||||
|
|
||||||
|
// test the export functionality of NixPublicKey as well
|
||||||
|
let export_pub2 = import_pub.export();
|
||||||
|
let import_pub2 = NixPublicKey::from_str(&export_pub2).expect("Could not re-import public key");
|
||||||
|
|
||||||
|
assert_eq!(keypair.name, import_pub2.name);
|
||||||
|
assert_eq!(keypair.keypair.pk, import_pub2.public);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_serde() {
|
||||||
|
let json = "\"attic-test:x326WFy/JUl+MQnN1u9NPdWQPBbcVn2mwoIqSLS3DmQqZ8qT8rBSxxEnyhtl3jDouBqodlyfq6F+HsVhbTYPMA==\"";
|
||||||
|
|
||||||
|
let keypair: NixKeypair = serde_json::from_str(json).expect("Could not deserialize keypair");
|
||||||
|
|
||||||
|
let export = serde_json::to_string(&keypair).expect("Could not serialize keypair");
|
||||||
|
|
||||||
|
eprintln!("Public Key: {}", keypair.export_public_key());
|
||||||
|
|
||||||
|
assert_eq!(json, &export);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_import_public_key() {
|
||||||
|
let cache_nixos_org = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=";
|
||||||
|
let import = NixPublicKey::from_str(cache_nixos_org).expect("Could not import public key");
|
||||||
|
|
||||||
|
assert_eq!(cache_nixos_org, import.export());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_signing() {
|
||||||
|
let keypair = NixKeypair::generate("attic-test").expect("Could not generate key");
|
||||||
|
|
||||||
|
let public = keypair.to_public_key();
|
||||||
|
|
||||||
|
let message = b"hello world";
|
||||||
|
|
||||||
|
let signature = keypair.sign(message);
|
||||||
|
|
||||||
|
keypair.verify(message, &signature).unwrap();
|
||||||
|
public.verify(message, &signature).unwrap();
|
||||||
|
|
||||||
|
keypair.verify(message, "attic-test:lo9EfNIL4eGRuNh7DTbAAffWPpI2SlYC/8uP7JnhgmfRIUNGhSbFe8qEaKN0mFS02TuhPpXFPNtRkFcCp0hGAQ==").unwrap_err();
|
||||||
|
}
|
110
attic/src/stream.rs
Normal file
110
attic/src/stream.rs
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
//! Stream utilities.
|
||||||
|
|
||||||
|
use std::marker::Unpin;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use digest::{Digest, Output as DigestOutput};
|
||||||
|
use tokio::io::{AsyncRead, ReadBuf};
|
||||||
|
use tokio::sync::OnceCell;
|
||||||
|
|
||||||
|
/// Stream filter that hashes the bytes that have been read.
|
||||||
|
///
|
||||||
|
/// The hash is finalized when EOF is reached.
|
||||||
|
pub struct StreamHasher<R: AsyncRead + Unpin, D: Digest + Unpin> {
|
||||||
|
inner: R,
|
||||||
|
digest: Option<D>,
|
||||||
|
bytes_read: usize,
|
||||||
|
finalized: Arc<OnceCell<(DigestOutput<D>, usize)>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: AsyncRead + Unpin, D: Digest + Unpin> StreamHasher<R, D> {
|
||||||
|
pub fn new(inner: R, digest: D) -> (Self, Arc<OnceCell<(DigestOutput<D>, usize)>>) {
|
||||||
|
let finalized = Arc::new(OnceCell::new());
|
||||||
|
|
||||||
|
(
|
||||||
|
Self {
|
||||||
|
inner,
|
||||||
|
digest: Some(digest),
|
||||||
|
bytes_read: 0,
|
||||||
|
finalized: finalized.clone(),
|
||||||
|
},
|
||||||
|
finalized,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: AsyncRead + Unpin, D: Digest + Unpin> AsyncRead for StreamHasher<R, D> {
|
||||||
|
fn poll_read(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &mut ReadBuf<'_>,
|
||||||
|
) -> Poll<tokio::io::Result<()>> {
|
||||||
|
let old_filled = buf.filled().len();
|
||||||
|
let r = Pin::new(&mut self.inner).poll_read(cx, buf);
|
||||||
|
let read_len = buf.filled().len() - old_filled;
|
||||||
|
|
||||||
|
match r {
|
||||||
|
Poll::Ready(Ok(())) => {
|
||||||
|
if read_len == 0 {
|
||||||
|
// EOF
|
||||||
|
if let Some(digest) = self.digest.take() {
|
||||||
|
self.finalized
|
||||||
|
.set((digest.finalize(), self.bytes_read))
|
||||||
|
.expect("Hash has already been finalized");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Read something
|
||||||
|
let digest = self.digest.as_mut().expect("Stream has data after EOF");
|
||||||
|
|
||||||
|
let filled = buf.filled();
|
||||||
|
digest.update(&filled[filled.len() - read_len..]);
|
||||||
|
self.bytes_read += read_len;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(_)) => {
|
||||||
|
assert!(read_len == 0);
|
||||||
|
}
|
||||||
|
Poll::Pending => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
use tokio::io::AsyncReadExt;
|
||||||
|
use tokio_test::block_on;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_stream_hasher() {
|
||||||
|
let expected = b"hello world";
|
||||||
|
let expected_sha256 =
|
||||||
|
hex::decode("b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9")
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let (mut read, finalized) = StreamHasher::new(expected.as_slice(), sha2::Sha256::new());
|
||||||
|
assert!(finalized.get().is_none());
|
||||||
|
|
||||||
|
// force multiple reads
|
||||||
|
let mut buf = vec![0u8; 100];
|
||||||
|
let mut bytes_read = 0;
|
||||||
|
bytes_read += block_on(read.read(&mut buf[bytes_read..bytes_read + 5])).unwrap();
|
||||||
|
bytes_read += block_on(read.read(&mut buf[bytes_read..bytes_read + 5])).unwrap();
|
||||||
|
bytes_read += block_on(read.read(&mut buf[bytes_read..bytes_read + 5])).unwrap();
|
||||||
|
bytes_read += block_on(read.read(&mut buf[bytes_read..bytes_read + 5])).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(expected.len(), bytes_read);
|
||||||
|
assert_eq!(expected, &buf[..bytes_read]);
|
||||||
|
|
||||||
|
let (hash, count) = finalized.get().expect("Hash wasn't finalized");
|
||||||
|
|
||||||
|
assert_eq!(expected_sha256.as_slice(), hash.as_slice());
|
||||||
|
assert_eq!(expected.len(), *count);
|
||||||
|
eprintln!("finalized = {:x?}", finalized);
|
||||||
|
}
|
||||||
|
}
|
3
attic/src/testing/mod.rs
Normal file
3
attic/src/testing/mod.rs
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
//! Utilities for testing.
|
||||||
|
|
||||||
|
pub mod shadow_store;
|
117
attic/src/testing/shadow_store/mod.rs
Normal file
117
attic/src/testing/shadow_store/mod.rs
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
//! Shadow Nix store.
|
||||||
|
//!
|
||||||
|
//! Since Nix 2.0, Nix can use an alternative root for the store via
|
||||||
|
//! `--store` while keeping the same `storeDir`. To test pulling from
|
||||||
|
//! an Attic server with vanilla Nix, we create a temporary root
|
||||||
|
//! for the store, as well as `nix.conf` and `netrc` configurations
|
||||||
|
//! required to connect to an Attic server.
|
||||||
|
//!
|
||||||
|
//! ## Manual example
|
||||||
|
//!
|
||||||
|
//! ```bash
|
||||||
|
//! NIX_CONF_DIR="$SHADOW/etc/nix" NIX_USER_CONF_FILES="" NIX_REMOTE="" \
|
||||||
|
//! nix-store --store "$SHADOW" -r /nix/store/h8fxhm945jlsfxlr4rvkkqlws771l07c-nix-2.7pre20220127_558c4ee -v
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! `nix.conf`:
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! substituters = http://localhost:8080/attic-test
|
||||||
|
//! trusted-public-keys = attic-test:KmfKk/KwUscRJ8obZd4w6LgaqHZcn6uhfh7FYW02DzA=
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! `netrc`:
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! machine localhost password eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjQwNzA5MDg4MDAsImh0dHBzOi8vemhhb2ZlbmdsaS5naXRodWIuaW8vYXR0aWMiOnsieC1hdHRpYy1hY2Nlc3MiOnsiY2FjaGVzIjp7IioiOnsicHVzaCI6dHJ1ZSwicHVsbCI6dHJ1ZX19fX19.58WIuL8H_fQGEPmUG7U61FUHtAmsHXanYtQFSgqni6U
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
use std::ffi::OsString;
|
||||||
|
use std::fs::{self, Permissions};
|
||||||
|
use std::os::unix::fs::PermissionsExt;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use tempfile::{Builder as TempfileBuilder, TempDir};
|
||||||
|
|
||||||
|
const WRAPPER_TEMPLATE: &str = include_str!("nix-wrapper.sh");
|
||||||
|
|
||||||
|
/// A shadow Nix store.
|
||||||
|
///
|
||||||
|
/// After creation, wrappers of common Nix executables will be
|
||||||
|
/// available under `bin`, allowing you to easily interact with
|
||||||
|
/// the shadow store.
|
||||||
|
pub struct ShadowStore {
|
||||||
|
store_root: TempDir,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShadowStore {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
let store_root = TempfileBuilder::new()
|
||||||
|
.prefix("shadow-store-")
|
||||||
|
.tempdir()
|
||||||
|
.expect("failed to create temporary root");
|
||||||
|
|
||||||
|
fs::create_dir_all(store_root.path().join("etc/nix"))
|
||||||
|
.expect("failed to create temporary config dir");
|
||||||
|
|
||||||
|
fs::create_dir_all(store_root.path().join("bin"))
|
||||||
|
.expect("failed to create temporary wrapper dir");
|
||||||
|
|
||||||
|
let store = Self { store_root };
|
||||||
|
store.create_wrapper("nix-store");
|
||||||
|
|
||||||
|
store
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the path to the store root.
|
||||||
|
pub fn path(&self) -> &Path {
|
||||||
|
self.store_root.path()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the path to the `nix-store` wrapper.
|
||||||
|
pub fn nix_store_cmd(&self) -> OsString {
|
||||||
|
self.store_root
|
||||||
|
.path()
|
||||||
|
.join("bin/nix-store")
|
||||||
|
.as_os_str()
|
||||||
|
.to_owned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a wrapper script for a Nix command.
|
||||||
|
fn create_wrapper(&self, command: &str) {
|
||||||
|
let path = self.store_root.path().join("bin").join(command);
|
||||||
|
let permissions = Permissions::from_mode(0o755);
|
||||||
|
let wrapper = WRAPPER_TEMPLATE
|
||||||
|
.replace("%command%", command)
|
||||||
|
.replace("%store_root%", &self.store_root.path().to_string_lossy());
|
||||||
|
|
||||||
|
fs::write(&path, wrapper).expect("failed to write wrapper script");
|
||||||
|
|
||||||
|
fs::set_permissions(&path, permissions).expect("failed to set wrapper permissions");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for ShadowStore {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// recursively set write permissions on directories so we can
|
||||||
|
// cleanly delete the entire store
|
||||||
|
|
||||||
|
fn walk(dir: &Path) {
|
||||||
|
// excuse the unwraps
|
||||||
|
let metadata = fs::metadata(dir).unwrap();
|
||||||
|
let mut permissions = metadata.permissions();
|
||||||
|
permissions.set_mode(permissions.mode() | 0o200);
|
||||||
|
fs::set_permissions(dir, permissions).unwrap();
|
||||||
|
|
||||||
|
for entry in fs::read_dir(dir).unwrap() {
|
||||||
|
let entry = entry.unwrap();
|
||||||
|
|
||||||
|
if entry.file_type().unwrap().is_dir() {
|
||||||
|
walk(&entry.path());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
walk(self.store_root.path());
|
||||||
|
}
|
||||||
|
}
|
6
attic/src/testing/shadow_store/nix-wrapper.sh
Normal file
6
attic/src/testing/shadow_store/nix-wrapper.sh
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
export NIX_CONF_DIR="%store_root%/etc/nix"
|
||||||
|
export NIX_USER_CONF_FILE=""
|
||||||
|
export NIX_REMOTE=""
|
||||||
|
|
||||||
|
exec %command% --store "%store_root%" "$@"
|
39
attic/src/util.rs
Normal file
39
attic/src/util.rs
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
//! Misc utilities.
|
||||||
|
|
||||||
|
use std::future::Future;
|
||||||
|
use std::mem;
|
||||||
|
|
||||||
|
use tokio::task;
|
||||||
|
|
||||||
|
/// Runs a future when dropped.
|
||||||
|
///
|
||||||
|
/// This is used to clean up external resources that are
|
||||||
|
/// difficult to correctly model using ownerships.
|
||||||
|
pub struct Finally<F: Future + Send + 'static>
|
||||||
|
where
|
||||||
|
F::Output: Send + 'static,
|
||||||
|
{
|
||||||
|
f: Option<F>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F: Future + Send + 'static> Finally<F>
|
||||||
|
where
|
||||||
|
F::Output: Send + 'static,
|
||||||
|
{
|
||||||
|
pub fn new(f: F) -> Self {
|
||||||
|
Self { f: Some(f) }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cancel(self) {
|
||||||
|
mem::forget(self);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F: Future + Send + 'static> Drop for Finally<F>
|
||||||
|
where
|
||||||
|
F::Output: Send + 'static,
|
||||||
|
{
|
||||||
|
fn drop(&mut self) {
|
||||||
|
task::spawn(self.f.take().unwrap());
|
||||||
|
}
|
||||||
|
}
|
1
book/.gitignore
vendored
Normal file
1
book/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
book
|
10
book/book.toml
Normal file
10
book/book.toml
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
[book]
|
||||||
|
authors = ["Zhaofeng Li"]
|
||||||
|
language = "en"
|
||||||
|
multilingual = false
|
||||||
|
src = "src"
|
||||||
|
title = "Attic"
|
||||||
|
|
||||||
|
[output.html]
|
||||||
|
git-repository-url = "https://github.com/zhaofengli/attic"
|
||||||
|
edit-url-template = "https://github.com/zhaofengli/attic/edit/main/book/{path}"
|
43
book/colorized-help.nix
Normal file
43
book/colorized-help.nix
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
{ lib, stdenv, runCommand, attic, ansi2html }:
|
||||||
|
|
||||||
|
with builtins;
|
||||||
|
|
||||||
|
let
|
||||||
|
commands = {
|
||||||
|
attic = [
|
||||||
|
null
|
||||||
|
"login"
|
||||||
|
"use"
|
||||||
|
"push"
|
||||||
|
"cache"
|
||||||
|
"cache create"
|
||||||
|
"cache configure"
|
||||||
|
"cache destroy"
|
||||||
|
"cache info"
|
||||||
|
];
|
||||||
|
atticd = [
|
||||||
|
null
|
||||||
|
];
|
||||||
|
atticadm = [
|
||||||
|
null
|
||||||
|
"make-token"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
renderMarkdown = name: subcommands: ''
|
||||||
|
mkdir -p $out
|
||||||
|
(
|
||||||
|
ansi2html -H
|
||||||
|
${lib.concatMapStrings (subcommand: let
|
||||||
|
fullCommand = "${name} ${if subcommand == null then "" else subcommand}";
|
||||||
|
in "${renderCommand fullCommand}\n") subcommands}
|
||||||
|
) >>$out/${name}.md
|
||||||
|
'';
|
||||||
|
renderCommand = fullCommand: ''
|
||||||
|
echo '## `${fullCommand}`'
|
||||||
|
echo -n '<pre><div class="hljs">'
|
||||||
|
TERM=xterm-256color CLICOLOR_FORCE=1 ${fullCommand} --help | ansi2html -p
|
||||||
|
echo '</div></pre>'
|
||||||
|
'';
|
||||||
|
in runCommand "attic-colorized-help" {
|
||||||
|
nativeBuildInputs = [ attic ansi2html ];
|
||||||
|
} (concatStringsSep "\n" (lib.mapAttrsToList renderMarkdown commands))
|
40
book/default.nix
Normal file
40
book/default.nix
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
{ lib, stdenv, nix-gitignore, mdbook, mdbook-linkcheck, python3, callPackage, writeScript
|
||||||
|
, attic ? null
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
colorizedHelp = let
|
||||||
|
help = callPackage ./colorized-help.nix {
|
||||||
|
inherit attic;
|
||||||
|
};
|
||||||
|
in if attic != null then help else null;
|
||||||
|
in stdenv.mkDerivation {
|
||||||
|
inherit colorizedHelp;
|
||||||
|
|
||||||
|
name = "attic-book";
|
||||||
|
|
||||||
|
src = nix-gitignore.gitignoreSource [] ./.;
|
||||||
|
|
||||||
|
nativeBuildInputs = [ mdbook ];
|
||||||
|
|
||||||
|
buildPhase = ''
|
||||||
|
emitColorizedHelp() {
|
||||||
|
command=$1
|
||||||
|
|
||||||
|
if [[ -n "$colorizedHelp" ]]; then
|
||||||
|
cat "$colorizedHelp/$command.md" >> src/reference/$command-cli.md
|
||||||
|
else
|
||||||
|
echo "Error: No attic executable passed to the builder" >> src/reference/$command-cli.md
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
emitColorizedHelp attic
|
||||||
|
emitColorizedHelp atticd
|
||||||
|
emitColorizedHelp atticadm
|
||||||
|
|
||||||
|
mdbook build -d ./build
|
||||||
|
cp -r ./build $out
|
||||||
|
'';
|
||||||
|
|
||||||
|
installPhase = "true";
|
||||||
|
}
|
11
book/src/SUMMARY.md
Normal file
11
book/src/SUMMARY.md
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
# Summary
|
||||||
|
|
||||||
|
- [Introduction](./introduction.md)
|
||||||
|
- [Tutorial](./tutorial.md)
|
||||||
|
- [User Guide](./user-guide/README.md)
|
||||||
|
- [Admin Guide](./admin-guide/README.md)
|
||||||
|
- [FAQs](./faqs.md)
|
||||||
|
- [Reference](./reference/README.md)
|
||||||
|
- [attic](./reference/attic-cli.md)
|
||||||
|
- [atticd](./reference/atticd-cli.md)
|
||||||
|
- [atticadm](./reference/atticadm-cli.md)
|
3
book/src/admin-guide/README.md
Normal file
3
book/src/admin-guide/README.md
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# Admin Guide
|
||||||
|
|
||||||
|
> This section is under construction.
|
37
book/src/faqs.md
Normal file
37
book/src/faqs.md
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
# FAQs
|
||||||
|
|
||||||
|
## Does it replace [Cachix](https://www.cachix.org)?
|
||||||
|
|
||||||
|
No, it does not.
|
||||||
|
Cachix is an awesome product and the direct inspiration for the user experience of Attic.
|
||||||
|
It works at a much larger scale than Attic and is a proven solution.
|
||||||
|
Numerous open-source projects in the Nix community (including mine!) use Cachix to share publicly-available binaries.
|
||||||
|
|
||||||
|
Attic can be thought to provide a similar user experience at a much smaller scale (personal or team use).
|
||||||
|
|
||||||
|
## What happens if a user uploads a path that is already in the global cache?
|
||||||
|
|
||||||
|
The user will still fully upload the path to the server because they have to prove possession of the file.
|
||||||
|
The difference is that instead of having the upload streamed to the storage backend (e.g., S3), it's only run through a hash function and discarded.
|
||||||
|
Once the NAR hash is confirmed, a mapping is created to grant the local cache access to the global NAR.
|
||||||
|
The global deduplication behavior is transparent to the client.
|
||||||
|
|
||||||
|
In the future, schemes to prove data possession without fully uploading the file may be supported.
|
||||||
|
|
||||||
|
## What happens if a user uploads a path with incorrect/malicious metadata?
|
||||||
|
|
||||||
|
They will only pollute their own cache.
|
||||||
|
Path metadata (store path, references, deriver, etc.) are associated with the local cache and the global cache only contains content-addressed NARs that are "context-free."
|
||||||
|
|
||||||
|
## How is authentication handled?
|
||||||
|
|
||||||
|
Authentication is done via signed JWTs containing the allowed permissions.
|
||||||
|
Each instance of `atticd --mode api-server` is stateless.
|
||||||
|
This design may be revisited later, with option for a more stateful method of authentication.
|
||||||
|
|
||||||
|
## On what granularity is deduplication done?
|
||||||
|
|
||||||
|
Currently, global deduplication is done on the level of NAR files.
|
||||||
|
File or chunk-level deduplication (e.g., casync) may be added later.
|
||||||
|
It remains to be seen how NAR reassembly can be done in a user-friendly yet economical manner.
|
||||||
|
On compute services, outbound traffic often isn't free while several S3-compatible storage services provide free egress (e.g., [Cloudflare R2](https://developers.cloudflare.com/r2/platform/pricing/)).
|
23
book/src/introduction.md
Normal file
23
book/src/introduction.md
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
# Introduction
|
||||||
|
|
||||||
|
**Attic** is a self-hostable Nix Binary Cache server backed by an S3-compatible storage provider.
|
||||||
|
It has support for global deduplication and garbage collection.
|
||||||
|
|
||||||
|
Attic is still an early prototype and is looking for more testers. Want to jump in? [Start your own Attic server](./tutorial.md) in 15 minutes.
|
||||||
|
|
||||||
|
```
|
||||||
|
⚙️ Pushing 5 paths to "demo" on "local" (566 already cached, 2001 in upstream)...
|
||||||
|
✅ gnvi1x7r8kl3clzx0d266wi82fgyzidv-steam-run-fhs (29.69 MiB/s)
|
||||||
|
✅ rw7bx7ak2p02ljm3z4hhpkjlr8rzg6xz-steam-fhs (30.56 MiB/s)
|
||||||
|
✅ y92f9y7qhkpcvrqhzvf6k40j6iaxddq8-0p36ammvgyr55q9w75845kw4fw1c65ln-source (19.96 MiB/s)
|
||||||
|
🕒 vscode-1.74.2 ███████████████████████████████████████ 345.66 MiB (41.32 MiB/s)
|
||||||
|
🕓 zoom-5.12.9.367 ███████████████████████████ 329.36 MiB (39.47 MiB/s)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Goals
|
||||||
|
|
||||||
|
- **Multi-Tenancy**: Create a private cache for yourself, and one for friends and co-workers. Tenants are mutually untrusting and cannot pollute the views of other caches.
|
||||||
|
- **Global Deduplication**: Individual caches (tenants) are simply restricted views of the content-addressed global cache. When paths are uploaded, a mapping is created to grant the local cache access to the global NAR.
|
||||||
|
- **Managed Signing**: Signing is done on-the-fly by the server when store paths are fetched. The user pushing store paths does not have access to the signing key.
|
||||||
|
- **High Availability**: Attic can be easily replicated. It's designed to be deployed to serverless platforms like fly.io but also works nicely in a single-machine setup.
|
||||||
|
- **Garbage Collection**: Unused store paths can be garbage-collected in an LRU manner.
|
7
book/src/reference/README.md
Normal file
7
book/src/reference/README.md
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# Reference
|
||||||
|
|
||||||
|
This section contains detailed listings of options and parameters accepted by Attic:
|
||||||
|
|
||||||
|
- [`attic` CLI](./attic-cli.md)
|
||||||
|
- [`atticd` CLI](./atticd-cli.md)
|
||||||
|
- [`atticadm` CLI](./atticadm-cli.md)
|
12
book/src/reference/attic-cli.md
Normal file
12
book/src/reference/attic-cli.md
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
# `attic` CLI
|
||||||
|
|
||||||
|
The following are the help messages that will be printed when you invoke any sub-command with `--help`:
|
||||||
|
|
||||||
|
<!--
|
||||||
|
The following is injected by the build system
|
||||||
|
|
||||||
|
Looking to improve the help messages? They are located in:
|
||||||
|
|
||||||
|
- client/src/cli.rs
|
||||||
|
- client/src/command/<subcommand>.rs
|
||||||
|
-->
|
11
book/src/reference/atticadm-cli.md
Normal file
11
book/src/reference/atticadm-cli.md
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
# `atticadm` CLI
|
||||||
|
|
||||||
|
The following are the help messages that will be printed when you invoke any sub-command with `--help`:
|
||||||
|
|
||||||
|
<!--
|
||||||
|
The following is injected by the build system
|
||||||
|
|
||||||
|
Looking to improve the help messages? They are located in:
|
||||||
|
|
||||||
|
- server/src/adm/main.rs
|
||||||
|
-->
|
11
book/src/reference/atticd-cli.md
Normal file
11
book/src/reference/atticd-cli.md
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
# `atticd` CLI
|
||||||
|
|
||||||
|
The following are the help messages that will be printed when you invoke any sub-command with `--help`:
|
||||||
|
|
||||||
|
<!--
|
||||||
|
The following is injected by the build system
|
||||||
|
|
||||||
|
Looking to improve the help messages? They are located in:
|
||||||
|
|
||||||
|
- server/src/cli.rs
|
||||||
|
-->
|
204
book/src/tutorial.md
Normal file
204
book/src/tutorial.md
Normal file
|
@ -0,0 +1,204 @@
|
||||||
|
# Tutorial
|
||||||
|
|
||||||
|
Let's spin up Attic in just 15 minutes (yes, it works on macOS too!):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
nix-shell https://github.com/zhaofengli/attic/tarball/main -A demo
|
||||||
|
```
|
||||||
|
|
||||||
|
Simply run `atticd` to start the server in monolithic mode with a SQLite database and local storage:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ atticd
|
||||||
|
Attic Server 0.1.0 (release)
|
||||||
|
|
||||||
|
-----------------
|
||||||
|
Welcome to Attic!
|
||||||
|
|
||||||
|
A simple setup using SQLite and local storage has been configured for you in:
|
||||||
|
|
||||||
|
/home/zhaofeng/.config/attic/server.toml
|
||||||
|
|
||||||
|
Run the following command to log into this server:
|
||||||
|
|
||||||
|
attic login local http://localhost:8080 eyJ...
|
||||||
|
|
||||||
|
Documentations and guides:
|
||||||
|
|
||||||
|
https://docs.attic.rs
|
||||||
|
|
||||||
|
Enjoy!
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
Running migrations...
|
||||||
|
Starting API server...
|
||||||
|
Listening on [::]:8080...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Cache Creation
|
||||||
|
|
||||||
|
`atticd` is the server, and `attic` is the client.
|
||||||
|
We can now log in and create a cache:
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Copy and paste from the atticd output
|
||||||
|
$ attic login local http://localhost:8080 eyJ...
|
||||||
|
✍️ Configuring server "local"
|
||||||
|
|
||||||
|
$ attic cache create hello
|
||||||
|
✨ Created cache "hello" on "local"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pushing
|
||||||
|
|
||||||
|
Let's push `attic` itself to the cache:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ attic push hello $(which attic)
|
||||||
|
⚙️ Pushing 1 paths to "hello" on "local" (0 already cached, 45 in upstream)...
|
||||||
|
✅ r5d7217c0rjd5iiz1g2nhvd15frck9x2-attic-0.1.0 (52.89 MiB/s)
|
||||||
|
```
|
||||||
|
|
||||||
|
The interesting thing is that `attic` automatically skipped over store paths cached by `cache.nixos.org`!
|
||||||
|
This behavior can be configured on a per-cache basis.
|
||||||
|
|
||||||
|
Note that Attic performs content-addressed global deduplication, so when you upload the same store path to another cache, the underlying NAR is only stored once.
|
||||||
|
Each cache is essentially a restricted view of the global cache.
|
||||||
|
|
||||||
|
## Pulling
|
||||||
|
|
||||||
|
Now, let's pull it back from the cache.
|
||||||
|
For demonstration purposes, let's use `--store` to make Nix download to another directory because Attic already exists in `/nix/store`:
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Automatically configures ~/.config/nix/nix.conf for you
|
||||||
|
$ attic use hello
|
||||||
|
Configuring Nix to use "hello" on "local":
|
||||||
|
+ Substituter: http://localhost:8080/hello
|
||||||
|
+ Trusted Public Key: hello:vlsd7ZHIXNnKXEQShVnd7erE8zcuSKrBWRpV6zTibnA=
|
||||||
|
+ Access Token
|
||||||
|
|
||||||
|
$ nix-store --store $PWD/nix-demo -r $(which attic)
|
||||||
|
[snip]
|
||||||
|
copying path '/nix/store/r5d7217c0rjd5iiz1g2nhvd15frck9x2-attic-0.1.0' from 'http://localhost:8080/hello'...
|
||||||
|
warning: you did not specify '--add-root'; the result might be removed by the garbage collector
|
||||||
|
/nix/store/r5d7217c0rjd5iiz1g2nhvd15frck9x2-attic-0.1.0
|
||||||
|
|
||||||
|
$ ls nix-demo/nix/store/r5d7217c0rjd5iiz1g2nhvd15frck9x2-attic-0.1.0/bin/attic
|
||||||
|
nix-demo/nix/store/r5d7217c0rjd5iiz1g2nhvd15frck9x2-attic-0.1.0/bin/attic
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that to pull into the actual Nix Store, your user must be considered [trusted](https://nixos.org/manual/nix/stable/command-ref/conf-file.html#conf-trusted-users) by the `nix-daemon`.
|
||||||
|
|
||||||
|
## Access Control
|
||||||
|
|
||||||
|
Attic performs stateless authentication using signed JWT tokens which contain permissions.
|
||||||
|
The root token printed out by `atticd` is all-powerful and should not be shared.
|
||||||
|
|
||||||
|
Let's create another token that can only access the `hello` cache:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ atticadm make-token --sub alice --validity '3 months' --pull hello --push hello
|
||||||
|
eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJhbGljZSIsImV4cCI6MTY4MDI5MzMzOSwiaHR0cHM6Ly9qd3QuYXR0aWMucnMvdjEiOnsiY2FjaGVzIjp7ImhlbGxvIjp7InIiOjEsInciOjF9fX19.XJsaVfjrX5l7p9z76836KXP6Vixn41QJUfxjiK7D-LM
|
||||||
|
```
|
||||||
|
|
||||||
|
Let's say Alice wants to have her own caches.
|
||||||
|
Instead of creating caches for her, we can let her do it herself:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ atticadm make-token --sub alice --validity '3 months' --pull 'alice-*' --push 'alice-*' --create-cache 'alice-*'
|
||||||
|
eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJhbGljZSIsImV4cCI6MTY4MDI5MzQyNSwiaHR0cHM6Ly9qd3QuYXR0aWMucnMvdjEiOnsiY2FjaGVzIjp7ImFsaWNlLSoiOnsiciI6MSwidyI6MSwiY2MiOjF9fX19.MkSnK6yGDWYUVnYiJF3tQgdTlqstfWlbziFWUr-lKUk
|
||||||
|
```
|
||||||
|
|
||||||
|
Now Alice can use this token to _create_ any cache beginning with `alice-` and push to them.
|
||||||
|
Try passing `--dump-claims` to show the JWT claims without encoding the token to see what's going on.
|
||||||
|
|
||||||
|
## Going Public
|
||||||
|
|
||||||
|
Let's make the cache public. Making it public gives unauthenticated users pull access:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ attic cache configure hello --public
|
||||||
|
✅ Configured "hello" on "local"
|
||||||
|
|
||||||
|
# Now we can query the cache without being authenticated
|
||||||
|
$ curl http://localhost:8080/hello/nix-cache-info
|
||||||
|
WantMassQuery: 1
|
||||||
|
StoreDir: /nix/store
|
||||||
|
Priority: 41
|
||||||
|
```
|
||||||
|
|
||||||
|
## Garbage Collection
|
||||||
|
|
||||||
|
It's a bad idea to let binary caches grow unbounded.
|
||||||
|
Let's configure garbage collection on the cache to automatically delete objects that haven't been accessed in a while:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ attic cache configure hello --retention-period '1s'
|
||||||
|
✅ Configured "hello" on "local"
|
||||||
|
```
|
||||||
|
|
||||||
|
Now the retention period is only one second.
|
||||||
|
Instead of waiting for the periodic garbage collection to occur (see `server.toml`), let's trigger it manually:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
atticd --mode garbage-collector-once
|
||||||
|
```
|
||||||
|
|
||||||
|
Now the store path doesn't exist on the cache anymore!
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ nix-store --store $PWD/nix-demo-2 -r $(which attic)
|
||||||
|
don't know how to build these paths:
|
||||||
|
/nix/store/v660wl07i1lcrrgpr1yspn2va5d1xgjr-attic-0.1.0
|
||||||
|
error: build of '/nix/store/v660wl07i1lcrrgpr1yspn2va5d1xgjr-attic-0.1.0' failed
|
||||||
|
|
||||||
|
$ curl http://localhost:8080/hello/v660wl07i1lcrrgpr1yspn2va5d1xgjr.narinfo
|
||||||
|
{"code":404,"error":"NoSuchObject","message":"The requested object does not exist."}
|
||||||
|
```
|
||||||
|
|
||||||
|
Let's reset it back to the default, which is to not garbage collect (configure it in `server.toml`):
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ attic cache configure hello --reset-retention-period
|
||||||
|
✅ Configured "hello" on "local"
|
||||||
|
|
||||||
|
$ attic cache info hello
|
||||||
|
Public: true
|
||||||
|
Public Key: hello:vlsd7ZHIXNnKXEQShVnd7erE8zcuSKrBWRpV6zTibnA=
|
||||||
|
Binary Cache Endpoint: http://localhost:8080/hello
|
||||||
|
API Endpoint: http://localhost:8080/
|
||||||
|
Store Directory: /nix/store
|
||||||
|
Priority: 41
|
||||||
|
Upstream Cache Keys: ["cache.nixos.org-1"]
|
||||||
|
Retention Period: Global Default
|
||||||
|
```
|
||||||
|
|
||||||
|
Because of Attic's global deduplication, garbage collection actually happens on two levels:
|
||||||
|
|
||||||
|
1. **Local Cache**: When an object is garbage collected, only the mapping between the metadata in the local cache and the NAR in the global cache gets deleted. The local cache loses access to the NAR, but the storage isn't freed.
|
||||||
|
2. **Global Cache**: Orphan NARs not referenced by any local cache then become eligible for deletion. This time the storage space is actually freed and subsequent uploads of the same NAR will actually trigger an upload to the storage backend.
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
In just a few commands, we have:
|
||||||
|
|
||||||
|
1. Set up a new Attic server and a binary cache
|
||||||
|
2. Pushed store paths to it
|
||||||
|
3. Configured Nix to use the new binary cache
|
||||||
|
4. Generated access tokens that provide restricted access
|
||||||
|
5. Made the cache public
|
||||||
|
6. Performed garbage collection
|
||||||
|
|
||||||
|
## What's next
|
||||||
|
|
||||||
|
> Note: Attic is an early prototype and everything is subject to change! It may be full of holes and APIs may be changed without backward-compatibility. You might even be required to reset the entire database. I would love to have people give it a try, but please keep that in mind ️:)
|
||||||
|
|
||||||
|
For a less temporary setup, you can set up `atticd` with PostgreSQL and S3.
|
||||||
|
You should also place it behind a load balancer like NGINX to provide HTTPS.
|
||||||
|
Take a look at `~/.config/attic/server.toml` to see what you can configure!
|
||||||
|
|
||||||
|
While it's easy to get started by running `atticd` in monolithic mode, for production use it's best to run different components of `atticd` separately with `--mode`:
|
||||||
|
|
||||||
|
- `worker`: Stateless and can be replicated.
|
||||||
|
- `garbage-collector`: Performs periodic garbage collection. Cannot be replicated.
|
40
book/src/user-guide/README.md
Normal file
40
book/src/user-guide/README.md
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
# User Guide
|
||||||
|
|
||||||
|
## Logging in
|
||||||
|
|
||||||
|
You should have received an `attic login` command from an admin like the following:
|
||||||
|
|
||||||
|
```
|
||||||
|
attic login central https://attic.domain.tld/ eyJ...
|
||||||
|
```
|
||||||
|
|
||||||
|
The `attic` client can work with multiple servers at the same time.
|
||||||
|
To select the `foo` cache from server `central`, use one of the following:
|
||||||
|
|
||||||
|
- `foo`, if the `central` server is configured as the default
|
||||||
|
- `central:foo`
|
||||||
|
|
||||||
|
To configure the default server, set `default-server` in `~/.config/attic/config.toml`.
|
||||||
|
|
||||||
|
## Enabling a cache
|
||||||
|
|
||||||
|
To configure Nix to automatically use cache `foo`:
|
||||||
|
|
||||||
|
```
|
||||||
|
attic use foo
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pushing to the cache
|
||||||
|
|
||||||
|
To push a store path to cache `foo`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
attic push foo /nix/store/...
|
||||||
|
```
|
||||||
|
|
||||||
|
Other examples include:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
attic push foo ./result
|
||||||
|
attic push foo /run/current-system
|
||||||
|
```
|
39
client/Cargo.toml
Normal file
39
client/Cargo.toml
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
[package]
|
||||||
|
name = "attic-client"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "attic"
|
||||||
|
path = "src/main.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
attic = { path = "../attic" }
|
||||||
|
|
||||||
|
anyhow = "1.0.68"
|
||||||
|
bytes = "1.3.0"
|
||||||
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
|
clap_complete = "4.0.2"
|
||||||
|
const_format = "0.2.30"
|
||||||
|
dialoguer = "0.10.2"
|
||||||
|
displaydoc = "0.2.3"
|
||||||
|
enum-as-inner = "0.5.1"
|
||||||
|
futures = "0.3.25"
|
||||||
|
humantime = "2.1.0"
|
||||||
|
indicatif = "0.17.2"
|
||||||
|
lazy_static = "1.4.0"
|
||||||
|
regex = "1.7.0"
|
||||||
|
reqwest = { version = "0.11.13", default-features = false, features = ["json", "rustls-tls", "stream"] }
|
||||||
|
serde = { version = "1.0.151", features = ["derive"] }
|
||||||
|
serde_json = "1.0.91"
|
||||||
|
toml = "0.5.10"
|
||||||
|
tracing = "0.1.37"
|
||||||
|
tracing-subscriber = "0.3.16"
|
||||||
|
xdg = "2.4.1"
|
||||||
|
|
||||||
|
[dependencies.tokio]
|
||||||
|
version = "1.23.0"
|
||||||
|
features = [
|
||||||
|
"full"
|
||||||
|
]
|
219
client/src/api/mod.rs
Normal file
219
client/src/api/mod.rs
Normal file
|
@ -0,0 +1,219 @@
|
||||||
|
use std::error::Error as StdError;
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use bytes::Bytes;
|
||||||
|
use const_format::concatcp;
|
||||||
|
use displaydoc::Display;
|
||||||
|
use futures::TryStream;
|
||||||
|
use reqwest::{
|
||||||
|
header::{HeaderMap, HeaderValue, AUTHORIZATION, USER_AGENT},
|
||||||
|
Body, Client as HttpClient, Response, StatusCode, Url,
|
||||||
|
};
|
||||||
|
use serde::Deserialize;
|
||||||
|
|
||||||
|
use crate::config::ServerConfig;
|
||||||
|
use crate::version::ATTIC_DISTRIBUTOR;
|
||||||
|
use attic::api::v1::cache_config::{CacheConfig, CreateCacheRequest};
|
||||||
|
use attic::api::v1::get_missing_paths::{GetMissingPathsRequest, GetMissingPathsResponse};
|
||||||
|
use attic::api::v1::upload_path::UploadPathNarInfo;
|
||||||
|
use attic::cache::CacheName;
|
||||||
|
use attic::nix_store::StorePathHash;
|
||||||
|
|
||||||
|
/// The User-Agent string of Attic.
|
||||||
|
const ATTIC_USER_AGENT: &str =
|
||||||
|
concatcp!("Attic/{} ({})", env!("CARGO_PKG_NAME"), ATTIC_DISTRIBUTOR);
|
||||||
|
|
||||||
|
/// The Attic API client.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ApiClient {
|
||||||
|
/// Base endpoint of the server.
|
||||||
|
endpoint: Url,
|
||||||
|
|
||||||
|
/// An initialized HTTP client.
|
||||||
|
client: HttpClient,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An API error.
|
||||||
|
#[derive(Debug, Display)]
|
||||||
|
pub enum ApiError {
|
||||||
|
/// {0}
|
||||||
|
Structured(StructuredApiError),
|
||||||
|
|
||||||
|
/// HTTP {0}: {1}
|
||||||
|
Unstructured(StatusCode, String),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
pub struct StructuredApiError {
|
||||||
|
#[allow(dead_code)]
|
||||||
|
code: u16,
|
||||||
|
error: String,
|
||||||
|
message: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ApiClient {
|
||||||
|
pub fn from_server_config(config: ServerConfig) -> Result<Self> {
|
||||||
|
let client = build_http_client(config.token.as_deref());
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
endpoint: Url::parse(&config.endpoint)?,
|
||||||
|
client,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the configuration of a cache.
|
||||||
|
pub async fn get_cache_config(&self, cache: &CacheName) -> Result<CacheConfig> {
|
||||||
|
let endpoint = self
|
||||||
|
.endpoint
|
||||||
|
.join("_api/v1/cache-config/")?
|
||||||
|
.join(cache.as_str())?;
|
||||||
|
|
||||||
|
let res = self.client.get(endpoint).send().await?;
|
||||||
|
|
||||||
|
if res.status().is_success() {
|
||||||
|
let cache_config = res.json().await?;
|
||||||
|
Ok(cache_config)
|
||||||
|
} else {
|
||||||
|
let api_error = ApiError::try_from_response(res).await?;
|
||||||
|
Err(api_error.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a cache.
|
||||||
|
pub async fn create_cache(&self, cache: &CacheName, request: CreateCacheRequest) -> Result<()> {
|
||||||
|
let endpoint = self
|
||||||
|
.endpoint
|
||||||
|
.join("_api/v1/cache-config/")?
|
||||||
|
.join(cache.as_str())?;
|
||||||
|
|
||||||
|
let res = self.client.post(endpoint).json(&request).send().await?;
|
||||||
|
|
||||||
|
if res.status().is_success() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
let api_error = ApiError::try_from_response(res).await?;
|
||||||
|
Err(api_error.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configures a cache.
|
||||||
|
pub async fn configure_cache(&self, cache: &CacheName, config: &CacheConfig) -> Result<()> {
|
||||||
|
let endpoint = self
|
||||||
|
.endpoint
|
||||||
|
.join("_api/v1/cache-config/")?
|
||||||
|
.join(cache.as_str())?;
|
||||||
|
|
||||||
|
let res = self.client.patch(endpoint).json(&config).send().await?;
|
||||||
|
|
||||||
|
if res.status().is_success() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
let api_error = ApiError::try_from_response(res).await?;
|
||||||
|
Err(api_error.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Destroys a cache.
|
||||||
|
pub async fn destroy_cache(&self, cache: &CacheName) -> Result<()> {
|
||||||
|
let endpoint = self
|
||||||
|
.endpoint
|
||||||
|
.join("_api/v1/cache-config/")?
|
||||||
|
.join(cache.as_str())?;
|
||||||
|
|
||||||
|
let res = self.client.delete(endpoint).send().await?;
|
||||||
|
|
||||||
|
if res.status().is_success() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
let api_error = ApiError::try_from_response(res).await?;
|
||||||
|
Err(api_error.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns paths missing from a cache.
|
||||||
|
pub async fn get_missing_paths(
|
||||||
|
&self,
|
||||||
|
cache: &CacheName,
|
||||||
|
store_path_hashes: Vec<StorePathHash>,
|
||||||
|
) -> Result<GetMissingPathsResponse> {
|
||||||
|
let endpoint = self.endpoint.join("_api/v1/get-missing-paths")?;
|
||||||
|
let payload = GetMissingPathsRequest {
|
||||||
|
cache: cache.to_owned(),
|
||||||
|
store_path_hashes,
|
||||||
|
};
|
||||||
|
|
||||||
|
let res = self.client.post(endpoint).json(&payload).send().await?;
|
||||||
|
|
||||||
|
if res.status().is_success() {
|
||||||
|
let cache_config = res.json().await?;
|
||||||
|
Ok(cache_config)
|
||||||
|
} else {
|
||||||
|
let api_error = ApiError::try_from_response(res).await?;
|
||||||
|
Err(api_error.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Uploads a path.
|
||||||
|
pub async fn upload_path<S>(&self, nar_info: UploadPathNarInfo, stream: S) -> Result<()>
|
||||||
|
where
|
||||||
|
S: TryStream + Send + Sync + 'static,
|
||||||
|
S::Error: Into<Box<dyn StdError + Send + Sync>>,
|
||||||
|
Bytes: From<S::Ok>,
|
||||||
|
{
|
||||||
|
let endpoint = self.endpoint.join("_api/v1/upload-path")?;
|
||||||
|
let upload_info_json = serde_json::to_string(&nar_info)?;
|
||||||
|
|
||||||
|
let res = self
|
||||||
|
.client
|
||||||
|
.put(endpoint)
|
||||||
|
.header(
|
||||||
|
"X-Attic-Nar-Info",
|
||||||
|
HeaderValue::from_str(&upload_info_json)?,
|
||||||
|
)
|
||||||
|
.header(USER_AGENT, HeaderValue::from_str(ATTIC_USER_AGENT)?)
|
||||||
|
.body(Body::wrap_stream(stream))
|
||||||
|
.send()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if res.status().is_success() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
let api_error = ApiError::try_from_response(res).await?;
|
||||||
|
Err(api_error.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StdError for ApiError {}
|
||||||
|
|
||||||
|
impl ApiError {
|
||||||
|
async fn try_from_response(response: Response) -> Result<Self> {
|
||||||
|
let status = response.status();
|
||||||
|
let text = response.text().await?;
|
||||||
|
match serde_json::from_str(&text) {
|
||||||
|
Ok(s) => Ok(Self::Structured(s)),
|
||||||
|
Err(_) => Ok(Self::Unstructured(status, text)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for StructuredApiError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "{}: {}", self.error, self.message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_http_client(token: Option<&str>) -> HttpClient {
|
||||||
|
let mut headers = HeaderMap::new();
|
||||||
|
|
||||||
|
if let Some(token) = token {
|
||||||
|
let auth_header = HeaderValue::from_str(&format!("bearer {}", token)).unwrap();
|
||||||
|
headers.insert(AUTHORIZATION, auth_header);
|
||||||
|
}
|
||||||
|
|
||||||
|
reqwest::Client::builder()
|
||||||
|
.default_headers(headers)
|
||||||
|
.build()
|
||||||
|
.unwrap()
|
||||||
|
}
|
77
client/src/cache.rs
Normal file
77
client/src/cache.rs
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
//! Client-specific cache references.
|
||||||
|
//!
|
||||||
|
//! The Attic client is designed to work with multiple servers.
|
||||||
|
//! Therefore, users can refer to caches in the following forms:
|
||||||
|
//!
|
||||||
|
//! - `cachename`: Will use `cachename` on the default server
|
||||||
|
//! - `servername:cachename`: Will use `cachename` on server `servername`
|
||||||
|
//! - `https://cache.server.tld/username`: Will auto-detect
|
||||||
|
//! - To be implemented
|
||||||
|
|
||||||
|
use std::ops::Deref;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
pub use attic::cache::{CacheName, CacheNamePattern};
|
||||||
|
|
||||||
|
/// A reference to a cache.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum CacheRef {
|
||||||
|
DefaultServer(CacheName),
|
||||||
|
ServerQualified(ServerName, CacheName),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A server name.
|
||||||
|
///
|
||||||
|
/// It has the same requirements as a cache name.
|
||||||
|
#[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
pub struct ServerName(CacheName);
|
||||||
|
|
||||||
|
impl CacheRef {
|
||||||
|
fn try_parse_cache(s: &str) -> Option<Self> {
|
||||||
|
let name = CacheName::new(s.to_owned()).ok()?;
|
||||||
|
Some(Self::DefaultServer(name))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_parse_server_qualified(s: &str) -> Option<Self> {
|
||||||
|
let (server, cache) = s.split_once(':')?;
|
||||||
|
let server = CacheName::new(server.to_owned()).ok()?;
|
||||||
|
let cache = CacheName::new(cache.to_owned()).ok()?;
|
||||||
|
Some(Self::ServerQualified(ServerName(server), cache))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for CacheRef {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self> {
|
||||||
|
if let Some(r) = Self::try_parse_cache(s) {
|
||||||
|
return Ok(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(r) = Self::try_parse_server_qualified(s) {
|
||||||
|
return Ok(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(anyhow!("Invalid cache reference"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for ServerName {
|
||||||
|
type Target = CacheName;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ServerName {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self> {
|
||||||
|
Ok(Self(CacheName::from_str(s)?))
|
||||||
|
}
|
||||||
|
}
|
69
client/src/cli.rs
Normal file
69
client/src/cli.rs
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
//! Global CLI Setup.
|
||||||
|
|
||||||
|
use std::env;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use clap::{CommandFactory, Parser, Subcommand};
|
||||||
|
use clap_complete::Shell;
|
||||||
|
use enum_as_inner::EnumAsInner;
|
||||||
|
|
||||||
|
use crate::command::cache::{self, Cache};
|
||||||
|
use crate::command::get_closure::{self, GetClosure};
|
||||||
|
use crate::command::login::{self, Login};
|
||||||
|
use crate::command::push::{self, Push};
|
||||||
|
use crate::command::r#use::{self, Use};
|
||||||
|
|
||||||
|
/// Attic binary cache client.
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
#[clap(version)]
|
||||||
|
#[clap(propagate_version = true)]
|
||||||
|
pub struct Opts {
|
||||||
|
#[clap(subcommand)]
|
||||||
|
pub command: Command,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Subcommand, EnumAsInner)]
|
||||||
|
pub enum Command {
|
||||||
|
Login(Login),
|
||||||
|
Use(Use),
|
||||||
|
Push(Push),
|
||||||
|
Cache(Cache),
|
||||||
|
|
||||||
|
#[clap(hide = true)]
|
||||||
|
GetClosure(GetClosure),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate shell autocompletion files.
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
pub struct GenCompletions {
|
||||||
|
/// The shell to generate autocompletion files for.
|
||||||
|
shell: Shell,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run() -> Result<()> {
|
||||||
|
// https://github.com/clap-rs/clap/issues/1335
|
||||||
|
if let Some("gen-completions") = env::args().nth(1).as_deref() {
|
||||||
|
return gen_completions(env::args().nth(2)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let opts = Opts::parse();
|
||||||
|
|
||||||
|
match opts.command {
|
||||||
|
Command::Login(_) => login::run(opts).await,
|
||||||
|
Command::Use(_) => r#use::run(opts).await,
|
||||||
|
Command::Push(_) => push::run(opts).await,
|
||||||
|
Command::Cache(_) => cache::run(opts).await,
|
||||||
|
Command::GetClosure(_) => get_closure::run(opts).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn gen_completions(shell: Option<String>) -> Result<()> {
|
||||||
|
let shell: Shell = shell
|
||||||
|
.ok_or_else(|| anyhow!("Must specify a shell."))?
|
||||||
|
.parse()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
clap_complete::generate(shell, &mut Opts::command(), "attic", &mut std::io::stdout());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
330
client/src/command/cache.rs
Normal file
330
client/src/command/cache.rs
Normal file
|
@ -0,0 +1,330 @@
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
use dialoguer::Input;
|
||||||
|
use humantime::Duration;
|
||||||
|
|
||||||
|
use crate::api::ApiClient;
|
||||||
|
use crate::cache::CacheRef;
|
||||||
|
use crate::cli::Opts;
|
||||||
|
use crate::config::Config;
|
||||||
|
use attic::api::v1::cache_config::{
|
||||||
|
CacheConfig, CreateCacheRequest, KeypairConfig, RetentionPeriodConfig,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Manage caches on an Attic server.
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
pub struct Cache {
|
||||||
|
#[clap(subcommand)]
|
||||||
|
command: Command,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Subcommand)]
|
||||||
|
enum Command {
|
||||||
|
Create(Create),
|
||||||
|
Configure(Configure),
|
||||||
|
Destroy(Destroy),
|
||||||
|
Info(Info),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a cache.
|
||||||
|
///
|
||||||
|
/// You need the `create_cache` permission on the cache that
|
||||||
|
/// you are creating.
|
||||||
|
#[derive(Debug, Clone, Parser)]
|
||||||
|
struct Create {
|
||||||
|
/// Name of the cache to create.
|
||||||
|
cache: CacheRef,
|
||||||
|
|
||||||
|
/// Make the cache public.
|
||||||
|
///
|
||||||
|
/// Public caches can be pulled from by anyone without
|
||||||
|
/// a token. Only those with the `push` permission can push.
|
||||||
|
///
|
||||||
|
/// By default, caches are private.
|
||||||
|
#[clap(long)]
|
||||||
|
public: bool,
|
||||||
|
|
||||||
|
/// The Nix store path this binary cache uses.
|
||||||
|
///
|
||||||
|
/// You probably don't want to change this. Changing
|
||||||
|
/// this can make your cache unusable.
|
||||||
|
#[clap(long, hide = true, default_value = "/nix/store")]
|
||||||
|
store_dir: String,
|
||||||
|
|
||||||
|
/// The priority of the binary cache.
|
||||||
|
///
|
||||||
|
/// A lower number denotes a higher priority.
|
||||||
|
/// <https://cache.nixos.org> has a priority of 40.
|
||||||
|
#[clap(long, default_value = "41")]
|
||||||
|
priority: i32,
|
||||||
|
|
||||||
|
/// The signing key name of an upstream cache.
|
||||||
|
///
|
||||||
|
/// When pushing to the cache, paths signed with this key
|
||||||
|
/// will be skipped by default. Specify this flag multiple
|
||||||
|
/// times to add multiple key names.
|
||||||
|
#[clap(
|
||||||
|
name = "NAME",
|
||||||
|
long = "upstream-cache-key-name",
|
||||||
|
default_value = "cache.nixos.org-1"
|
||||||
|
)]
|
||||||
|
upstream_cache_key_names: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configure a cache.
|
||||||
|
///
|
||||||
|
/// You need the `configure_cache` permission on the cache that
|
||||||
|
/// you are configuring.
|
||||||
|
#[derive(Debug, Clone, Parser)]
|
||||||
|
struct Configure {
|
||||||
|
/// Name of the cache to configure.
|
||||||
|
cache: CacheRef,
|
||||||
|
|
||||||
|
/// Regenerate the signing keypair.
|
||||||
|
///
|
||||||
|
/// The server-side signing key will be regenerated and
|
||||||
|
/// all users will need to configure the new signing key
|
||||||
|
/// in `nix.conf`.
|
||||||
|
#[clap(long)]
|
||||||
|
regenerate_keypair: bool,
|
||||||
|
|
||||||
|
/// Make the cache public.
|
||||||
|
///
|
||||||
|
/// Use `--private` to make it private.
|
||||||
|
#[clap(long)]
|
||||||
|
public: bool,
|
||||||
|
|
||||||
|
/// Make the cache private.
|
||||||
|
///
|
||||||
|
/// Use `--public` to make it public.
|
||||||
|
#[clap(long)]
|
||||||
|
private: bool,
|
||||||
|
|
||||||
|
/// The Nix store path this binary cache uses.
|
||||||
|
///
|
||||||
|
/// You probably don't want to change this. Changing
|
||||||
|
/// this can make your cache unusable.
|
||||||
|
#[clap(long, hide = true)]
|
||||||
|
store_dir: Option<String>,
|
||||||
|
|
||||||
|
/// The priority of the binary cache.
|
||||||
|
///
|
||||||
|
/// A lower number denotes a higher priority.
|
||||||
|
/// <https://cache.nixos.org> has a priority of 40.
|
||||||
|
#[clap(long)]
|
||||||
|
priority: Option<i32>,
|
||||||
|
|
||||||
|
/// The signing key name of an upstream cache.
|
||||||
|
///
|
||||||
|
/// When pushing to the cache, paths signed with this key
|
||||||
|
/// will be skipped by default. Specify this flag multiple
|
||||||
|
/// times to add multiple key names.
|
||||||
|
#[clap(value_name = "NAME", long = "upstream-cache-key-name")]
|
||||||
|
upstream_cache_key_names: Option<Vec<String>>,
|
||||||
|
|
||||||
|
/// Set the retention period of the cache.
|
||||||
|
///
|
||||||
|
/// You can use expressions like "2 years", "3 months"
|
||||||
|
/// and "1y".
|
||||||
|
#[clap(long, value_name = "PERIOD")]
|
||||||
|
retention_period: Option<Duration>,
|
||||||
|
|
||||||
|
/// Reset the retention period of the cache to global default.
|
||||||
|
#[clap(long)]
|
||||||
|
reset_retention_period: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Destroy a cache.
|
||||||
|
///
|
||||||
|
/// Destroying a cache causes it to become unavailable but the
|
||||||
|
/// underlying data may not be deleted immediately. Depending
|
||||||
|
/// on the server configuration, you may or may not be able to
|
||||||
|
/// create the cache of the same name.
|
||||||
|
///
|
||||||
|
/// You need the `destroy_cache` permission on the cache that
|
||||||
|
/// you are destroying.
|
||||||
|
#[derive(Debug, Clone, Parser)]
|
||||||
|
struct Destroy {
|
||||||
|
/// Name of the cache to destroy.
|
||||||
|
cache: CacheRef,
|
||||||
|
|
||||||
|
/// Don't ask for interactive confirmation.
|
||||||
|
#[clap(long)]
|
||||||
|
no_confirm: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Show the current configuration of a cache.
|
||||||
|
#[derive(Debug, Clone, Parser)]
|
||||||
|
struct Info {
|
||||||
|
/// Name of the cache to query.
|
||||||
|
cache: CacheRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run(opts: Opts) -> Result<()> {
|
||||||
|
let sub = opts.command.as_cache().unwrap();
|
||||||
|
match &sub.command {
|
||||||
|
Command::Create(sub) => create_cache(sub.to_owned()).await,
|
||||||
|
Command::Configure(sub) => configure_cache(sub.to_owned()).await,
|
||||||
|
Command::Destroy(sub) => destroy_cache(sub.to_owned()).await,
|
||||||
|
Command::Info(sub) => show_cache_config(sub.to_owned()).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_cache(sub: Create) -> Result<()> {
|
||||||
|
let config = Config::load()?;
|
||||||
|
|
||||||
|
let (server_name, server, cache) = config.resolve_cache(&sub.cache)?;
|
||||||
|
let api = ApiClient::from_server_config(server.clone())?;
|
||||||
|
|
||||||
|
let request = CreateCacheRequest {
|
||||||
|
// TODO: Make this configurable?
|
||||||
|
keypair: KeypairConfig::Generate,
|
||||||
|
is_public: sub.public,
|
||||||
|
priority: sub.priority,
|
||||||
|
store_dir: sub.store_dir,
|
||||||
|
upstream_cache_key_names: sub.upstream_cache_key_names,
|
||||||
|
};
|
||||||
|
|
||||||
|
api.create_cache(cache, request).await?;
|
||||||
|
eprintln!(
|
||||||
|
"✨ Created cache \"{}\" on \"{}\"",
|
||||||
|
cache.as_str(),
|
||||||
|
server_name.as_str()
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn configure_cache(sub: Configure) -> Result<()> {
|
||||||
|
let config = Config::load()?;
|
||||||
|
|
||||||
|
let (server_name, server, cache) = config.resolve_cache(&sub.cache)?;
|
||||||
|
let mut patch = CacheConfig::blank();
|
||||||
|
|
||||||
|
if sub.public && sub.private {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"`--public` and `--private` cannot be set at the same time."
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if sub.retention_period.is_some() && sub.reset_retention_period {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"`--retention-period` and `--reset-retention-period` cannot be set at the same time."
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if sub.public {
|
||||||
|
patch.is_public = Some(true);
|
||||||
|
} else if sub.private {
|
||||||
|
patch.is_public = Some(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(period) = sub.retention_period {
|
||||||
|
patch.retention_period = Some(RetentionPeriodConfig::Period(period.as_secs() as u32));
|
||||||
|
} else {
|
||||||
|
patch.retention_period = Some(RetentionPeriodConfig::Global);
|
||||||
|
}
|
||||||
|
|
||||||
|
if sub.regenerate_keypair {
|
||||||
|
patch.keypair = Some(KeypairConfig::Generate);
|
||||||
|
}
|
||||||
|
|
||||||
|
patch.store_dir = sub.store_dir;
|
||||||
|
patch.priority = sub.priority;
|
||||||
|
patch.upstream_cache_key_names = sub.upstream_cache_key_names;
|
||||||
|
|
||||||
|
let api = ApiClient::from_server_config(server.clone())?;
|
||||||
|
api.configure_cache(cache, &patch).await?;
|
||||||
|
|
||||||
|
eprintln!(
|
||||||
|
"✅ Configured \"{}\" on \"{}\"",
|
||||||
|
cache.as_str(),
|
||||||
|
server_name.as_str()
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn destroy_cache(sub: Destroy) -> Result<()> {
|
||||||
|
let config = Config::load()?;
|
||||||
|
|
||||||
|
let (server_name, server, cache) = config.resolve_cache(&sub.cache)?;
|
||||||
|
|
||||||
|
if !sub.no_confirm {
|
||||||
|
eprintln!("When you destory a cache:");
|
||||||
|
eprintln!();
|
||||||
|
eprintln!("1. Everyone will immediately lose access.");
|
||||||
|
eprintln!("2. The underlying data won't be deleted immediately.");
|
||||||
|
eprintln!("3. You may not be able to create a cache of the same name.");
|
||||||
|
eprintln!();
|
||||||
|
|
||||||
|
let answer: String = Input::new()
|
||||||
|
.with_prompt(format!(
|
||||||
|
"⚠️ Type the cache name to confirm destroying \"{}\" on \"{}\"",
|
||||||
|
cache.as_str(),
|
||||||
|
server_name.as_str()
|
||||||
|
))
|
||||||
|
.allow_empty(true)
|
||||||
|
.interact()?;
|
||||||
|
|
||||||
|
if answer != cache.as_str() {
|
||||||
|
return Err(anyhow!("Incorrect answer. Aborting..."));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let api = ApiClient::from_server_config(server.clone())?;
|
||||||
|
api.destroy_cache(cache).await?;
|
||||||
|
|
||||||
|
eprintln!("🗑️ The cache was destroyed.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn show_cache_config(sub: Info) -> Result<()> {
|
||||||
|
let config = Config::load()?;
|
||||||
|
|
||||||
|
let (_, server, cache) = config.resolve_cache(&sub.cache)?;
|
||||||
|
let api = ApiClient::from_server_config(server.clone())?;
|
||||||
|
let cache_config = api.get_cache_config(cache).await?;
|
||||||
|
|
||||||
|
if let Some(is_public) = cache_config.is_public {
|
||||||
|
eprintln!(" Public: {}", is_public);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(public_key) = cache_config.public_key {
|
||||||
|
eprintln!(" Public Key: {}", public_key);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(substituter_endpoint) = cache_config.substituter_endpoint {
|
||||||
|
eprintln!("Binary Cache Endpoint: {}", substituter_endpoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(api_endpoint) = cache_config.api_endpoint {
|
||||||
|
eprintln!(" API Endpoint: {}", api_endpoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(store_dir) = cache_config.store_dir {
|
||||||
|
eprintln!(" Store Directory: {}", store_dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(priority) = cache_config.priority {
|
||||||
|
eprintln!(" Priority: {}", priority);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(upstream_cache_key_names) = cache_config.upstream_cache_key_names {
|
||||||
|
eprintln!(" Upstream Cache Keys: {:?}", upstream_cache_key_names);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(retention_period) = cache_config.retention_period {
|
||||||
|
match retention_period {
|
||||||
|
RetentionPeriodConfig::Period(period) => {
|
||||||
|
eprintln!(" Retention Period: {:?}", period);
|
||||||
|
}
|
||||||
|
RetentionPeriodConfig::Global => {
|
||||||
|
eprintln!(" Retention Period: Global Default");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
31
client/src/command/get_closure.rs
Normal file
31
client/src/command/get_closure.rs
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use clap::Parser;
|
||||||
|
|
||||||
|
use crate::cli::Opts;
|
||||||
|
use attic::nix_store::NixStore;
|
||||||
|
|
||||||
|
/// Returns the closure of a store path (test).
|
||||||
|
///
|
||||||
|
/// This is similar to `nix-store -qR`.
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
pub struct GetClosure {
|
||||||
|
store_path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run(opts: Opts) -> Result<()> {
|
||||||
|
let sub = opts.command.as_get_closure().unwrap();
|
||||||
|
|
||||||
|
let store = NixStore::connect()?;
|
||||||
|
let store_path = store.follow_store_path(&sub.store_path)?;
|
||||||
|
let closure = store
|
||||||
|
.compute_fs_closure(store_path, false, false, false)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
for path in &closure {
|
||||||
|
println!("{}", store.get_full_path(path).to_str().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
48
client/src/command/login.rs
Normal file
48
client/src/command/login.rs
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
use anyhow::Result;
|
||||||
|
use clap::Parser;
|
||||||
|
|
||||||
|
use crate::cache::ServerName;
|
||||||
|
use crate::cli::Opts;
|
||||||
|
use crate::config::{Config, ServerConfig};
|
||||||
|
|
||||||
|
/// Log into an Attic server.
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
pub struct Login {
|
||||||
|
/// Name of the server.
|
||||||
|
name: ServerName,
|
||||||
|
|
||||||
|
/// Endpoint of the server.
|
||||||
|
endpoint: String,
|
||||||
|
|
||||||
|
/// Access token.
|
||||||
|
token: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run(opts: Opts) -> Result<()> {
|
||||||
|
let sub = opts.command.as_login().unwrap();
|
||||||
|
let mut config = Config::load()?;
|
||||||
|
let mut config_m = config.as_mut();
|
||||||
|
|
||||||
|
if let Some(server) = config_m.servers.get_mut(&sub.name) {
|
||||||
|
eprintln!("✍️ Overwriting server \"{}\"", sub.name.as_str());
|
||||||
|
|
||||||
|
server.endpoint = sub.endpoint.to_owned();
|
||||||
|
server.token = sub.token.to_owned();
|
||||||
|
} else {
|
||||||
|
eprintln!("✍️ Configuring server \"{}\"", sub.name.as_str());
|
||||||
|
|
||||||
|
config_m.servers.insert(
|
||||||
|
sub.name.to_owned(),
|
||||||
|
ServerConfig {
|
||||||
|
endpoint: sub.endpoint.to_owned(),
|
||||||
|
token: sub.token.to_owned(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if config_m.servers.len() == 1 {
|
||||||
|
config_m.default_server = Some(sub.name.to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
5
client/src/command/mod.rs
Normal file
5
client/src/command/mod.rs
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pub mod cache;
|
||||||
|
pub mod get_closure;
|
||||||
|
pub mod login;
|
||||||
|
pub mod push;
|
||||||
|
pub mod r#use;
|
355
client/src/command/push.rs
Normal file
355
client/src/command/push.rs
Normal file
|
@ -0,0 +1,355 @@
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::fmt::Write;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use clap::Parser;
|
||||||
|
use futures::future::join_all;
|
||||||
|
use futures::stream::Stream;
|
||||||
|
use indicatif::{HumanBytes, MultiProgress, ProgressBar, ProgressState, ProgressStyle};
|
||||||
|
use tokio::sync::Semaphore;
|
||||||
|
|
||||||
|
use crate::api::ApiClient;
|
||||||
|
use crate::cache::{CacheName, CacheRef};
|
||||||
|
use crate::cli::Opts;
|
||||||
|
use crate::config::Config;
|
||||||
|
use attic::api::v1::upload_path::UploadPathNarInfo;
|
||||||
|
use attic::error::AtticResult;
|
||||||
|
use attic::nix_store::{NixStore, StorePath, StorePathHash, ValidPathInfo};
|
||||||
|
|
||||||
|
/// Push closures to a binary cache.
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
pub struct Push {
|
||||||
|
/// The cache to push to.
|
||||||
|
cache: CacheRef,
|
||||||
|
|
||||||
|
/// The store paths to push.
|
||||||
|
paths: Vec<PathBuf>,
|
||||||
|
|
||||||
|
/// Push the specified paths only and do not compute closures.
|
||||||
|
#[clap(long)]
|
||||||
|
no_closure: bool,
|
||||||
|
|
||||||
|
/// Ignore the upstream cache filter.
|
||||||
|
#[clap(long)]
|
||||||
|
ignore_upstream_cache_filter: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct PushPlan {
|
||||||
|
/// Store paths to push.
|
||||||
|
store_path_map: HashMap<StorePathHash, ValidPathInfo>,
|
||||||
|
|
||||||
|
/// The number of paths in the original full closure.
|
||||||
|
num_all_paths: usize,
|
||||||
|
|
||||||
|
/// Number of paths that have been filtered out because they are already cached.
|
||||||
|
num_already_cached: usize,
|
||||||
|
|
||||||
|
/// Number of paths that have been filtered out because they are signed by an upstream cache.
|
||||||
|
num_upstream: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrapper to update a progress bar as a NAR is streamed.
|
||||||
|
struct NarStreamProgress<S> {
|
||||||
|
stream: S,
|
||||||
|
bar: ProgressBar,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Uploads a single path to a cache.
|
||||||
|
pub async fn upload_path(
|
||||||
|
store: Arc<NixStore>,
|
||||||
|
path_info: ValidPathInfo,
|
||||||
|
api: ApiClient,
|
||||||
|
cache: &CacheName,
|
||||||
|
mp: MultiProgress,
|
||||||
|
) -> Result<()> {
|
||||||
|
let path = &path_info.path;
|
||||||
|
let upload_info = {
|
||||||
|
let full_path = store
|
||||||
|
.get_full_path(path)
|
||||||
|
.to_str()
|
||||||
|
.ok_or_else(|| anyhow!("Path contains non-UTF-8"))?
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
let references = path_info
|
||||||
|
.references
|
||||||
|
.into_iter()
|
||||||
|
.map(|pb| {
|
||||||
|
pb.to_str()
|
||||||
|
.ok_or_else(|| anyhow!("Reference contains non-UTF-8"))
|
||||||
|
.map(|s| s.to_owned())
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<String>, anyhow::Error>>()?;
|
||||||
|
|
||||||
|
UploadPathNarInfo {
|
||||||
|
cache: cache.to_owned(),
|
||||||
|
store_path_hash: path.to_hash(),
|
||||||
|
store_path: full_path,
|
||||||
|
references,
|
||||||
|
system: None, // TODO
|
||||||
|
deriver: None, // TODO
|
||||||
|
sigs: path_info.sigs,
|
||||||
|
ca: path_info.ca,
|
||||||
|
nar_hash: path_info.nar_hash.to_owned(),
|
||||||
|
nar_size: path_info.nar_size as usize,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let template = format!(
|
||||||
|
"{{spinner}} {: <20.20} {{bar:40.green/blue}} {{human_bytes:10}} ({{average_speed}})",
|
||||||
|
path.name(),
|
||||||
|
);
|
||||||
|
let style = ProgressStyle::with_template(&template)
|
||||||
|
.unwrap()
|
||||||
|
.tick_chars("🕛🕐🕑🕒🕓🕔🕕🕖🕗🕘🕙🕚✅")
|
||||||
|
.progress_chars("██ ")
|
||||||
|
.with_key("human_bytes", |state: &ProgressState, w: &mut dyn Write| {
|
||||||
|
write!(w, "{}", HumanBytes(state.pos())).unwrap();
|
||||||
|
})
|
||||||
|
// Adapted from
|
||||||
|
// <https://github.com/console-rs/indicatif/issues/394#issuecomment-1309971049>
|
||||||
|
.with_key(
|
||||||
|
"average_speed",
|
||||||
|
|state: &ProgressState, w: &mut dyn Write| match (state.pos(), state.elapsed()) {
|
||||||
|
(pos, elapsed) if elapsed > Duration::ZERO => {
|
||||||
|
write!(w, "{}", average_speed(pos, elapsed)).unwrap();
|
||||||
|
}
|
||||||
|
_ => write!(w, "-").unwrap(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
let bar = mp.add(ProgressBar::new(path_info.nar_size));
|
||||||
|
bar.set_style(style);
|
||||||
|
let nar_stream = NarStreamProgress::new(store.nar_from_path(path.to_owned()), bar.clone());
|
||||||
|
|
||||||
|
let start = Instant::now();
|
||||||
|
match api.upload_path(upload_info, nar_stream).await {
|
||||||
|
Ok(_) => {
|
||||||
|
let elapsed = start.elapsed();
|
||||||
|
let seconds = elapsed.as_secs_f64();
|
||||||
|
let speed = (path_info.nar_size as f64 / seconds) as u64;
|
||||||
|
|
||||||
|
mp.suspend(|| {
|
||||||
|
eprintln!(
|
||||||
|
"✅ {} ({}/s)",
|
||||||
|
path.as_os_str().to_string_lossy(),
|
||||||
|
HumanBytes(speed)
|
||||||
|
);
|
||||||
|
});
|
||||||
|
bar.finish_and_clear();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
mp.suspend(|| {
|
||||||
|
eprintln!("❌ {}: {}", path.as_os_str().to_string_lossy(), e);
|
||||||
|
});
|
||||||
|
bar.finish_and_clear();
|
||||||
|
Err(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run(opts: Opts) -> Result<()> {
|
||||||
|
let sub = opts.command.as_push().unwrap();
|
||||||
|
let config = Config::load()?;
|
||||||
|
|
||||||
|
let store = Arc::new(NixStore::connect()?);
|
||||||
|
let roots = sub
|
||||||
|
.paths
|
||||||
|
.clone()
|
||||||
|
.into_iter()
|
||||||
|
.map(|p| store.follow_store_path(&p))
|
||||||
|
.collect::<std::result::Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
|
let (server_name, server, cache) = config.resolve_cache(&sub.cache)?;
|
||||||
|
|
||||||
|
let api = ApiClient::from_server_config(server.clone())?;
|
||||||
|
let plan = PushPlan::plan(
|
||||||
|
store.clone(),
|
||||||
|
&api,
|
||||||
|
cache,
|
||||||
|
roots,
|
||||||
|
sub.no_closure,
|
||||||
|
sub.ignore_upstream_cache_filter,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if plan.store_path_map.is_empty() {
|
||||||
|
if plan.num_all_paths == 0 {
|
||||||
|
eprintln!("🤷 Nothing selected.");
|
||||||
|
} else {
|
||||||
|
eprintln!(
|
||||||
|
"✅ All done! ({num_already_cached} already cached, {num_upstream} in upstream)",
|
||||||
|
num_already_cached = plan.num_already_cached,
|
||||||
|
num_upstream = plan.num_upstream,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Ok(());
|
||||||
|
} else {
|
||||||
|
eprintln!("⚙️ Pushing {num_missing_paths} paths to \"{cache}\" on \"{server}\" ({num_already_cached} already cached, {num_upstream} in upstream)...",
|
||||||
|
cache = cache.as_str(),
|
||||||
|
server = server_name.as_str(),
|
||||||
|
num_missing_paths = plan.store_path_map.len(),
|
||||||
|
num_already_cached = plan.num_already_cached,
|
||||||
|
num_upstream = plan.num_upstream,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mp = MultiProgress::new();
|
||||||
|
let upload_limit = Arc::new(Semaphore::new(10)); // FIXME
|
||||||
|
let futures = plan
|
||||||
|
.store_path_map
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, path_info)| {
|
||||||
|
let store = store.clone();
|
||||||
|
let api = api.clone();
|
||||||
|
let mp = mp.clone();
|
||||||
|
let upload_limit = upload_limit.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let permit = upload_limit.acquire().await?;
|
||||||
|
|
||||||
|
upload_path(store.clone(), path_info, api, cache, mp.clone()).await?;
|
||||||
|
|
||||||
|
drop(permit);
|
||||||
|
Ok::<(), anyhow::Error>(())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
futures::future::join_all(futures)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.collect::<Result<Vec<()>>>()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PushPlan {
|
||||||
|
/// Creates a plan.
|
||||||
|
async fn plan(
|
||||||
|
store: Arc<NixStore>,
|
||||||
|
api: &ApiClient,
|
||||||
|
cache: &CacheName,
|
||||||
|
roots: Vec<StorePath>,
|
||||||
|
no_closure: bool,
|
||||||
|
ignore_upstream_filter: bool,
|
||||||
|
) -> Result<Self> {
|
||||||
|
// Compute closure
|
||||||
|
let closure = if no_closure {
|
||||||
|
roots
|
||||||
|
} else {
|
||||||
|
store
|
||||||
|
.compute_fs_closure_multi(roots, false, false, false)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut store_path_map: HashMap<StorePathHash, ValidPathInfo> = {
|
||||||
|
let futures = closure
|
||||||
|
.iter()
|
||||||
|
.map(|path| {
|
||||||
|
let store = store.clone();
|
||||||
|
let path = path.clone();
|
||||||
|
let path_hash = path.to_hash();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let path_info = store.query_path_info(path).await?;
|
||||||
|
Ok((path_hash, path_info))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
join_all(futures).await.into_iter().collect::<Result<_>>()?
|
||||||
|
};
|
||||||
|
|
||||||
|
let num_all_paths = store_path_map.len();
|
||||||
|
if store_path_map.is_empty() {
|
||||||
|
return Ok(Self {
|
||||||
|
store_path_map,
|
||||||
|
num_all_paths,
|
||||||
|
num_already_cached: 0,
|
||||||
|
num_upstream: 0,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Confirm remote cache validity, query cache config
|
||||||
|
let cache_config = api.get_cache_config(cache).await?;
|
||||||
|
|
||||||
|
if !ignore_upstream_filter {
|
||||||
|
// Filter out paths signed by upstream caches
|
||||||
|
let upstream_cache_key_names =
|
||||||
|
cache_config.upstream_cache_key_names.unwrap_or_default();
|
||||||
|
store_path_map.retain(|_, pi| {
|
||||||
|
for sig in &pi.sigs {
|
||||||
|
if let Some((name, _)) = sig.split_once(':') {
|
||||||
|
if upstream_cache_key_names.iter().any(|u| name == u) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
true
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let num_filtered_paths = store_path_map.len();
|
||||||
|
if store_path_map.is_empty() {
|
||||||
|
return Ok(Self {
|
||||||
|
store_path_map,
|
||||||
|
num_all_paths,
|
||||||
|
num_already_cached: 0,
|
||||||
|
num_upstream: num_all_paths - num_filtered_paths,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query missing paths
|
||||||
|
let missing_path_hashes: HashSet<StorePathHash> = {
|
||||||
|
let store_path_hashes = store_path_map.keys().map(|sph| sph.to_owned()).collect();
|
||||||
|
let res = api.get_missing_paths(cache, store_path_hashes).await?;
|
||||||
|
res.missing_paths.into_iter().collect()
|
||||||
|
};
|
||||||
|
store_path_map.retain(|sph, _| missing_path_hashes.contains(sph));
|
||||||
|
let num_missing_paths = store_path_map.len();
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
store_path_map,
|
||||||
|
num_all_paths,
|
||||||
|
num_already_cached: num_filtered_paths - num_missing_paths,
|
||||||
|
num_upstream: num_all_paths - num_filtered_paths,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Stream<Item = AtticResult<Vec<u8>>>> NarStreamProgress<S> {
|
||||||
|
fn new(stream: S, bar: ProgressBar) -> Self {
|
||||||
|
Self { stream, bar }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: Stream<Item = AtticResult<Vec<u8>>> + Unpin> Stream for NarStreamProgress<S> {
|
||||||
|
type Item = AtticResult<Vec<u8>>;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
|
match Pin::new(&mut self.stream).as_mut().poll_next(cx) {
|
||||||
|
Poll::Ready(Some(data)) => {
|
||||||
|
if let Ok(data) = &data {
|
||||||
|
self.bar.inc(data.len() as u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
Poll::Ready(Some(data))
|
||||||
|
}
|
||||||
|
other => other,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Just the average, no fancy sliding windows that cause wild fluctuations
|
||||||
|
// <https://github.com/console-rs/indicatif/issues/394>
|
||||||
|
fn average_speed(bytes: u64, duration: Duration) -> String {
|
||||||
|
let speed = bytes as f64 * 1000_f64 / duration.as_millis() as f64;
|
||||||
|
format!("{}/s", HumanBytes(speed as u64))
|
||||||
|
}
|
68
client/src/command/use.rs
Normal file
68
client/src/command/use.rs
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use clap::Parser;
|
||||||
|
use reqwest::Url;
|
||||||
|
|
||||||
|
use crate::api::ApiClient;
|
||||||
|
use crate::cache::CacheRef;
|
||||||
|
use crate::cli::Opts;
|
||||||
|
use crate::config::Config;
|
||||||
|
use crate::nix_config::NixConfig;
|
||||||
|
use crate::nix_netrc::NixNetrc;
|
||||||
|
|
||||||
|
/// Configure Nix to use a binary cache.
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
pub struct Use {
|
||||||
|
/// The cache to configure.
|
||||||
|
cache: CacheRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run(opts: Opts) -> Result<()> {
|
||||||
|
let sub = opts.command.as_use().unwrap();
|
||||||
|
let config = Config::load()?;
|
||||||
|
|
||||||
|
let (server_name, server, cache) = config.resolve_cache(&sub.cache)?;
|
||||||
|
|
||||||
|
let api = ApiClient::from_server_config(server.clone())?;
|
||||||
|
let cache_config = api.get_cache_config(cache).await?;
|
||||||
|
|
||||||
|
let substituter = cache_config
|
||||||
|
.substituter_endpoint
|
||||||
|
.ok_or_else(|| anyhow!("The server did not tell us where the binary cache endpoint is."))?;
|
||||||
|
let public_key = cache_config.public_key
|
||||||
|
.ok_or_else(|| anyhow!("The server did not tell us which public key it uses. Is signing managed by the client?"))?;
|
||||||
|
|
||||||
|
eprintln!(
|
||||||
|
"Configuring Nix to use \"{cache}\" on \"{server_name}\":",
|
||||||
|
cache = cache.as_str(),
|
||||||
|
server_name = server_name.as_str(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Modify nix.conf
|
||||||
|
eprintln!("+ Substituter: {}", substituter);
|
||||||
|
eprintln!("+ Trusted Public Key: {}", public_key);
|
||||||
|
|
||||||
|
let mut nix_config = NixConfig::load().await?;
|
||||||
|
nix_config.add_substituter(&substituter);
|
||||||
|
nix_config.add_trusted_public_key(&public_key);
|
||||||
|
|
||||||
|
// Modify netrc
|
||||||
|
if let Some(token) = &server.token {
|
||||||
|
eprintln!("+ Access Token");
|
||||||
|
|
||||||
|
let mut nix_netrc = NixNetrc::load().await?;
|
||||||
|
let host = Url::parse(&server.endpoint)?
|
||||||
|
.host()
|
||||||
|
.map(|h| h.to_string())
|
||||||
|
.unwrap();
|
||||||
|
nix_netrc.add_token(host, token.to_string());
|
||||||
|
nix_netrc.save().await?;
|
||||||
|
|
||||||
|
let netrc_path = nix_netrc.path().unwrap().to_str().unwrap();
|
||||||
|
|
||||||
|
nix_config.set_netrc_file(netrc_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
nix_config.save().await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
182
client/src/config.rs
Normal file
182
client/src/config.rs
Normal file
|
@ -0,0 +1,182 @@
|
||||||
|
//! Client configurations.
|
||||||
|
//!
|
||||||
|
//! Configuration files are stored under `$XDG_CONFIG_HOME/attic/config.toml`.
|
||||||
|
//! We automatically write modified configurations back for a good end-user
|
||||||
|
//! experience (e.g., `attic login`).
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fs;
|
||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
|
use crate::cache::{CacheName, CacheRef, ServerName};
|
||||||
|
|
||||||
|
/// Application prefix in XDG base directories.
|
||||||
|
///
|
||||||
|
/// This will be concatenated into `$XDG_CONFIG_HOME/attic`.
|
||||||
|
const XDG_PREFIX: &str = "attic";
|
||||||
|
|
||||||
|
/// Configuration loader.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Config {
|
||||||
|
/// Actual configuration data.
|
||||||
|
data: ConfigData,
|
||||||
|
|
||||||
|
/// Path to write modified configurations back to.
|
||||||
|
path: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Client configurations.
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct ConfigData {
|
||||||
|
/// The default server to connect to.
|
||||||
|
#[serde(rename = "default-server")]
|
||||||
|
pub default_server: Option<ServerName>,
|
||||||
|
|
||||||
|
/// A set of remote servers and access credentials.
|
||||||
|
#[serde(default = "HashMap::new")]
|
||||||
|
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||||
|
pub servers: HashMap<ServerName, ServerConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuration of a server.
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct ServerConfig {
|
||||||
|
pub endpoint: String,
|
||||||
|
pub token: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrapper that automatically saves the config once dropped.
|
||||||
|
pub struct ConfigWriteGuard<'a>(&'a mut Config);
|
||||||
|
|
||||||
|
impl Config {
|
||||||
|
/// Loads the configuration from the system.
|
||||||
|
pub fn load() -> Result<Self> {
|
||||||
|
let path = get_config_path()
|
||||||
|
.map_err(|e| {
|
||||||
|
tracing::warn!("Could not get config path: {}", e);
|
||||||
|
e
|
||||||
|
})
|
||||||
|
.ok();
|
||||||
|
|
||||||
|
let data = ConfigData::load_from_path(path.as_ref())?;
|
||||||
|
|
||||||
|
Ok(Self { data, path })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the configuration.
|
||||||
|
pub fn as_mut(&mut self) -> ConfigWriteGuard {
|
||||||
|
ConfigWriteGuard(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Saves the configuration back to the system, if possible.
|
||||||
|
pub fn save(&self) -> Result<()> {
|
||||||
|
if let Some(path) = &self.path {
|
||||||
|
let serialized = toml::to_string(&self.data)?;
|
||||||
|
fs::write(path, serialized.as_bytes())?;
|
||||||
|
|
||||||
|
tracing::debug!("Saved modified configuration to {:?}", path);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for Config {
|
||||||
|
type Target = ConfigData;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConfigData {
|
||||||
|
fn load_from_path(path: Option<&PathBuf>) -> Result<Self> {
|
||||||
|
if let Some(path) = path {
|
||||||
|
if path.exists() {
|
||||||
|
let contents = fs::read(path)?;
|
||||||
|
let data = toml::from_slice(&contents)?;
|
||||||
|
return Ok(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ConfigData::default())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn default_server(&self) -> Result<(&ServerName, &ServerConfig)> {
|
||||||
|
if let Some(name) = &self.default_server {
|
||||||
|
let config = self.servers.get(name).ok_or_else(|| {
|
||||||
|
anyhow!(
|
||||||
|
"Configured default server \"{}\" does not exist",
|
||||||
|
name.as_str()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
Ok((name, config))
|
||||||
|
} else if let Some((name, config)) = self.servers.iter().next() {
|
||||||
|
Ok((name, config))
|
||||||
|
} else {
|
||||||
|
Err(anyhow!("No servers are available."))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn resolve_cache<'a>(
|
||||||
|
&'a self,
|
||||||
|
r: &'a CacheRef,
|
||||||
|
) -> Result<(&'a ServerName, &'a ServerConfig, &'a CacheName)> {
|
||||||
|
match r {
|
||||||
|
CacheRef::DefaultServer(cache) => {
|
||||||
|
let (name, config) = self.default_server()?;
|
||||||
|
Ok((name, config, cache))
|
||||||
|
}
|
||||||
|
CacheRef::ServerQualified(server, cache) => {
|
||||||
|
let config = self
|
||||||
|
.servers
|
||||||
|
.get(server)
|
||||||
|
.ok_or_else(|| anyhow!("Server \"{}\" does not exist", server.as_str()))?;
|
||||||
|
Ok((server, config, cache))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ConfigData {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
default_server: None,
|
||||||
|
servers: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Deref for ConfigWriteGuard<'a> {
|
||||||
|
type Target = ConfigData;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> DerefMut for ConfigWriteGuard<'a> {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
&mut self.0.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Drop for ConfigWriteGuard<'a> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Err(e) = self.0.save() {
|
||||||
|
tracing::error!("Could not save modified configuration: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_config_path() -> Result<PathBuf> {
|
||||||
|
let xdg_dirs = BaseDirectories::with_prefix(XDG_PREFIX)?;
|
||||||
|
let config_path = xdg_dirs.place_config_file("config.toml")?;
|
||||||
|
|
||||||
|
Ok(config_path)
|
||||||
|
}
|
36
client/src/main.rs
Normal file
36
client/src/main.rs
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
#![deny(
|
||||||
|
asm_sub_register,
|
||||||
|
deprecated,
|
||||||
|
missing_abi,
|
||||||
|
unsafe_code,
|
||||||
|
unused_macros,
|
||||||
|
unused_must_use,
|
||||||
|
unused_unsafe
|
||||||
|
)]
|
||||||
|
#![deny(clippy::from_over_into, clippy::needless_question_mark)]
|
||||||
|
#![cfg_attr(
|
||||||
|
not(debug_assertions),
|
||||||
|
deny(unused_imports, unused_mut, unused_variables,)
|
||||||
|
)]
|
||||||
|
|
||||||
|
mod api;
|
||||||
|
mod cache;
|
||||||
|
mod cli;
|
||||||
|
mod command;
|
||||||
|
mod config;
|
||||||
|
mod nix_config;
|
||||||
|
mod nix_netrc;
|
||||||
|
mod version;
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
init_logging()?;
|
||||||
|
cli::run().await
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init_logging() -> Result<()> {
|
||||||
|
tracing_subscriber::fmt::init();
|
||||||
|
Ok(())
|
||||||
|
}
|
265
client/src/nix_config.rs
Normal file
265
client/src/nix_config.rs
Normal file
|
@ -0,0 +1,265 @@
|
||||||
|
//! Nix configuration files.
|
||||||
|
//!
|
||||||
|
//! We automatically edit the user's `nix.conf` to add new
|
||||||
|
//! binary caches while trying to keep the formatting intact.
|
||||||
|
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use regex::Regex;
|
||||||
|
use tokio::fs;
|
||||||
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref COMMENT_LINE: Regex = {
|
||||||
|
Regex::new(r"^\s*(#.*)?$").unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
static ref KV_LINE: Regex = {
|
||||||
|
// I know what you are thinking, but...
|
||||||
|
// `key=val` is not valid, and `🔥🔥🔥very=WILD=key🔥🔥🔥 = value` is perfectly valid :)
|
||||||
|
// Also, despite syntax highlighting of some editors, backslashes do _not_ escape the comment character.
|
||||||
|
Regex::new(r"^(?P<whitespace_s>\s*)(?P<key>[^\s]+)(?P<whitespace_l>\s+)=(?P<whitespace_r>\s+)(?P<value>[^#]+)(?P<comment>#.*)?$").unwrap()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The server of cache.nixos.org.
|
||||||
|
const CACHE_NIXOS_ORG_SUBSTITUTER: &str = "https://cache.nixos.org";
|
||||||
|
|
||||||
|
/// The public key of cache.nixos.org.
|
||||||
|
const CACHE_NIXOS_ORG_KEY: &str = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=";
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct NixConfig {
|
||||||
|
/// Path to write the modified configuration back to.
|
||||||
|
path: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// Configuration lines.
|
||||||
|
lines: Vec<Line>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A line in the configuration.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
enum Line {
|
||||||
|
Comment(String),
|
||||||
|
KV {
|
||||||
|
key: String,
|
||||||
|
value: String,
|
||||||
|
whitespace_s: String,
|
||||||
|
whitespace_l: String,
|
||||||
|
whitespace_r: String,
|
||||||
|
comment: Option<String>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NixConfig {
|
||||||
|
pub async fn load() -> Result<Self> {
|
||||||
|
let nix_base = BaseDirectories::with_prefix("nix")?;
|
||||||
|
let path = nix_base.place_config_file("nix.conf")?;
|
||||||
|
|
||||||
|
let lines = if path.exists() {
|
||||||
|
let content = fs::read_to_string(&path).await?;
|
||||||
|
Line::from_lines(&content)?
|
||||||
|
} else {
|
||||||
|
Vec::new()
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
path: Some(path),
|
||||||
|
lines,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Saves the modified configuration file.
|
||||||
|
pub async fn save(&self) -> Result<()> {
|
||||||
|
if let Some(path) = &self.path {
|
||||||
|
fs::write(path, self.to_string()).await?;
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(anyhow!("Don't know how to save the nix.conf"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reserialize the configuration back to a string.
|
||||||
|
pub fn to_string(&self) -> String {
|
||||||
|
self.lines
|
||||||
|
.iter()
|
||||||
|
.map(|l| l.to_string())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a new substituter.
|
||||||
|
pub fn add_substituter(&mut self, substituter: &str) {
|
||||||
|
self.prepend_to_list("substituters", substituter, CACHE_NIXOS_ORG_SUBSTITUTER);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a new trusted public key.
|
||||||
|
pub fn add_trusted_public_key(&mut self, public_key: &str) {
|
||||||
|
self.prepend_to_list("trusted-public-keys", public_key, CACHE_NIXOS_ORG_KEY);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the netrc-file config.
|
||||||
|
pub fn set_netrc_file(&mut self, path: &str) {
|
||||||
|
if let Some(kv) = self.find_key("netrc-file") {
|
||||||
|
if let Line::KV { ref mut value, .. } = kv {
|
||||||
|
*value = path.to_string();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
self.lines
|
||||||
|
.push(Line::kv("netrc-file".to_string(), path.to_string()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn prepend_to_list(&mut self, key: &str, value: &str, default_tail: &str) {
|
||||||
|
if let Some(kv) = self.find_key(key) {
|
||||||
|
if let Line::KV {
|
||||||
|
value: ref mut list,
|
||||||
|
..
|
||||||
|
} = kv
|
||||||
|
{
|
||||||
|
if !list.split(' ').any(|el| el == value) {
|
||||||
|
*list = format!("{value} {list}");
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
unreachable!();
|
||||||
|
} else {
|
||||||
|
let list = format!("{value} {default_tail}");
|
||||||
|
self.lines.push(Line::kv(key.to_string(), list));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_key(&mut self, key: &str) -> Option<&mut Line> {
|
||||||
|
self.lines.iter_mut().find(|l| {
|
||||||
|
if let Line::KV { key: k, .. } = l {
|
||||||
|
k == key
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Line {
|
||||||
|
fn from_lines(s: &str) -> Result<Vec<Self>> {
|
||||||
|
let mut lines: Vec<Line> = Vec::new();
|
||||||
|
|
||||||
|
for line in s.lines() {
|
||||||
|
lines.push(Line::from_str(line)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(lines)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_str(line: &str) -> Result<Self> {
|
||||||
|
if COMMENT_LINE.is_match(line) {
|
||||||
|
return Ok(Self::Comment(line.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(matches) = KV_LINE.captures(line) {
|
||||||
|
return Ok(Self::KV {
|
||||||
|
key: matches.name("key").unwrap().as_str().to_owned(),
|
||||||
|
value: matches.name("value").unwrap().as_str().to_owned(),
|
||||||
|
whitespace_s: matches.name("whitespace_s").unwrap().as_str().to_owned(),
|
||||||
|
whitespace_l: matches.name("whitespace_l").unwrap().as_str().to_owned(),
|
||||||
|
whitespace_r: matches.name("whitespace_r").unwrap().as_str().to_owned(),
|
||||||
|
comment: matches.name("comment").map(|s| s.as_str().to_owned()),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(anyhow!("Line \"{}\" isn't valid", line))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_string(&self) -> String {
|
||||||
|
match self {
|
||||||
|
Self::Comment(l) => l.clone(),
|
||||||
|
Self::KV {
|
||||||
|
key,
|
||||||
|
value,
|
||||||
|
whitespace_s,
|
||||||
|
whitespace_l,
|
||||||
|
whitespace_r,
|
||||||
|
comment,
|
||||||
|
} => {
|
||||||
|
let cmt = comment.as_deref().unwrap_or("");
|
||||||
|
format!("{whitespace_s}{key}{whitespace_l}={whitespace_r}{value}{cmt}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn kv(key: String, value: String) -> Self {
|
||||||
|
Self::KV {
|
||||||
|
key,
|
||||||
|
value,
|
||||||
|
whitespace_s: String::new(),
|
||||||
|
whitespace_l: " ".to_string(),
|
||||||
|
whitespace_r: " ".to_string(),
|
||||||
|
comment: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_nix_config_parse_line() {
|
||||||
|
assert_eq!(
|
||||||
|
Line::from_str("# some comment").unwrap(),
|
||||||
|
Line::Comment("# some comment".to_string()),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
Line::from_str(" # some indented comment").unwrap(),
|
||||||
|
Line::Comment(" # some indented comment".to_string()),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
Line::from_str(" ").unwrap(),
|
||||||
|
Line::Comment(" ".to_string()),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
Line::from_str("key = value").unwrap(),
|
||||||
|
Line::KV {
|
||||||
|
key: "key".to_string(),
|
||||||
|
value: "value".to_string(),
|
||||||
|
whitespace_s: "".to_string(),
|
||||||
|
whitespace_l: " ".to_string(),
|
||||||
|
whitespace_r: " ".to_string(),
|
||||||
|
comment: None,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
Line::from_str(" 🔥🔥🔥very=WILD=key🔥🔥🔥 = value = #comment").unwrap(),
|
||||||
|
Line::KV {
|
||||||
|
key: "🔥🔥🔥very=WILD=key🔥🔥🔥".to_string(),
|
||||||
|
value: "value = ".to_string(),
|
||||||
|
whitespace_s: "\t ".to_string(),
|
||||||
|
whitespace_l: " ".to_string(),
|
||||||
|
whitespace_r: "\t".to_string(),
|
||||||
|
comment: Some("#comment".to_string()),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_nix_config_line_roundtrip() {
|
||||||
|
let cases = [
|
||||||
|
"# some comment",
|
||||||
|
" # some indented comment",
|
||||||
|
" ",
|
||||||
|
"key = value",
|
||||||
|
" 🔥🔥🔥very=WILD=key🔥🔥🔥 = value = #comment",
|
||||||
|
];
|
||||||
|
|
||||||
|
for case in cases {
|
||||||
|
let line = Line::from_str(case).unwrap();
|
||||||
|
assert_eq!(case, line.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
248
client/src/nix_netrc.rs
Normal file
248
client/src/nix_netrc.rs
Normal file
|
@ -0,0 +1,248 @@
|
||||||
|
//! Nix netrc files.
|
||||||
|
//!
|
||||||
|
//! We automatically edit the user's `netrc` to add cache server
|
||||||
|
//! tokens.
|
||||||
|
//!
|
||||||
|
//! This is a very naive implementation. The whole thing should be
|
||||||
|
//! refactored to be cleaner and operate on streams.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fmt;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use tokio::fs;
|
||||||
|
use xdg::BaseDirectories;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct NixNetrc {
|
||||||
|
/// Path to write the modified netrc back to.
|
||||||
|
path: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// Machines in the netrc file.
|
||||||
|
machines: HashMap<String, Machine>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
|
struct Machine {
|
||||||
|
/// A password.
|
||||||
|
password: Option<String>,
|
||||||
|
|
||||||
|
/// Any other tokens that we must preserve.
|
||||||
|
///
|
||||||
|
/// We output in pairs when reserializing. Curl allows the key
|
||||||
|
/// and value to be on different lines, but who knows about other
|
||||||
|
/// implementations?
|
||||||
|
other: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NixNetrc {
|
||||||
|
pub async fn load() -> Result<Self> {
|
||||||
|
let nix_base = BaseDirectories::with_prefix("nix")?;
|
||||||
|
let path = nix_base.place_config_file("netrc")?;
|
||||||
|
|
||||||
|
let machines = if path.exists() {
|
||||||
|
let content = fs::read_to_string(&path).await?;
|
||||||
|
parse_machines(&content)?
|
||||||
|
} else {
|
||||||
|
HashMap::new()
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
path: Some(path),
|
||||||
|
machines,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the path to the netrc file.
|
||||||
|
pub fn path(&self) -> Option<&Path> {
|
||||||
|
self.path.as_deref()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Saves the modified configuration file.
|
||||||
|
pub async fn save(&self) -> Result<()> {
|
||||||
|
if let Some(path) = &self.path {
|
||||||
|
let mut content = String::new();
|
||||||
|
serialize_machines(&mut content, &self.machines)?;
|
||||||
|
fs::write(path, content).await?;
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(anyhow!("Don't know how to save the netrc"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a token as a password.
|
||||||
|
pub fn add_token(&mut self, machine: String, token: String) {
|
||||||
|
if let Some(m) = self.machines.get_mut(&machine) {
|
||||||
|
m.password = Some(token);
|
||||||
|
} else {
|
||||||
|
self.machines.insert(
|
||||||
|
machine,
|
||||||
|
Machine {
|
||||||
|
password: Some(token),
|
||||||
|
other: Vec::new(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_machines(netrc: &str) -> Result<HashMap<String, Machine>> {
|
||||||
|
let mut machines = HashMap::new();
|
||||||
|
let mut cur_machine = None;
|
||||||
|
|
||||||
|
let mut cur;
|
||||||
|
let mut remaining = netrc;
|
||||||
|
while !remaining.is_empty() {
|
||||||
|
(cur, remaining) = get_next_token(remaining);
|
||||||
|
|
||||||
|
match cur {
|
||||||
|
"" => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
"default" => {
|
||||||
|
if let Some((name, machine)) = cur_machine {
|
||||||
|
machines.insert(name, machine);
|
||||||
|
}
|
||||||
|
|
||||||
|
cur_machine = Some((
|
||||||
|
"".to_string(),
|
||||||
|
Machine {
|
||||||
|
password: None,
|
||||||
|
other: Vec::new(),
|
||||||
|
},
|
||||||
|
));
|
||||||
|
}
|
||||||
|
"machine" => {
|
||||||
|
let (m_name, m_remaining) = get_next_token(remaining);
|
||||||
|
remaining = m_remaining;
|
||||||
|
|
||||||
|
if let Some((name, machine)) = cur_machine {
|
||||||
|
machines.insert(name, machine);
|
||||||
|
}
|
||||||
|
|
||||||
|
cur_machine = Some((
|
||||||
|
m_name.to_string(),
|
||||||
|
Machine {
|
||||||
|
password: None,
|
||||||
|
other: Vec::new(),
|
||||||
|
},
|
||||||
|
));
|
||||||
|
}
|
||||||
|
"password" => {
|
||||||
|
let (m_password, m_remaining) = get_next_token(remaining);
|
||||||
|
remaining = m_remaining;
|
||||||
|
|
||||||
|
if let Some((_, ref mut machine)) = &mut cur_machine {
|
||||||
|
machine.password = Some(m_password.to_string());
|
||||||
|
} else {
|
||||||
|
return Err(anyhow!("Password field outside a machine block"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tok => {
|
||||||
|
if let Some((_, ref mut machine)) = &mut cur_machine {
|
||||||
|
machine.other.push(tok.to_string());
|
||||||
|
} else {
|
||||||
|
return Err(anyhow!("Unknown token {} outside a machine block", tok));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some((name, machine)) = cur_machine {
|
||||||
|
machines.insert(name, machine);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(machines)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn serialize_machines(w: &mut impl fmt::Write, machines: &HashMap<String, Machine>) -> Result<()> {
|
||||||
|
for (name, machine) in machines.iter() {
|
||||||
|
if name.is_empty() {
|
||||||
|
writeln!(w, "default")?;
|
||||||
|
} else {
|
||||||
|
writeln!(w, "machine {}", name)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(password) = &machine.password {
|
||||||
|
writeln!(w, "password {}", password)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for chunk in machine.other.chunks(2) {
|
||||||
|
writeln!(w, "{}", chunk.join(" "))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_next_token(s: &str) -> (&str, &str) {
|
||||||
|
let s = strip_leading_whitespace(s);
|
||||||
|
if let Some(idx) = s.find(|c| c == '\n' || c == ' ' || c == '\t') {
|
||||||
|
(&s[..idx], strip_leading_whitespace(&s[idx + 1..]))
|
||||||
|
} else {
|
||||||
|
(s, "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn strip_leading_whitespace(s: &str) -> &str {
|
||||||
|
if let Some(idx) = s.find(|c| c != '\n' && c != ' ' && c != '\t') {
|
||||||
|
&s[idx..]
|
||||||
|
} else {
|
||||||
|
""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_netrc_strip() {
|
||||||
|
assert_eq!("", strip_leading_whitespace(" "));
|
||||||
|
assert_eq!("a", strip_leading_whitespace(" a"));
|
||||||
|
assert_eq!("abc", strip_leading_whitespace(" \t\t\n\nabc"));
|
||||||
|
assert_eq!("abc", strip_leading_whitespace("abc"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_netrc_tokenization() {
|
||||||
|
assert_eq!(("", ""), get_next_token(""));
|
||||||
|
assert_eq!(("", ""), get_next_token(" "));
|
||||||
|
assert_eq!(("", ""), get_next_token("\n"));
|
||||||
|
assert_eq!(("", ""), get_next_token("\t"));
|
||||||
|
|
||||||
|
assert_eq!(("a", ""), get_next_token("a "));
|
||||||
|
assert_eq!(("a", ""), get_next_token(" a"));
|
||||||
|
assert_eq!(("a", ""), get_next_token(" a "));
|
||||||
|
|
||||||
|
assert_eq!(("abc", ""), get_next_token("abc"));
|
||||||
|
|
||||||
|
assert_eq!(("a", "b"), get_next_token("a b"));
|
||||||
|
assert_eq!(("a", "b c"), get_next_token("a b c"));
|
||||||
|
assert_eq!(("a", "b\nc"), get_next_token("a\nb\nc"));
|
||||||
|
assert_eq!(("a", "b\nc"), get_next_token("a\tb\nc"));
|
||||||
|
|
||||||
|
assert_eq!(("a", "b c"), get_next_token("a b c"));
|
||||||
|
assert_eq!(("a", "b\nc"), get_next_token("a\n\n\nb\nc"));
|
||||||
|
assert_eq!(("a", "b\nc"), get_next_token("a\n\t\nb\nc"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_netrc_parse() {
|
||||||
|
let machines = parse_machines(
|
||||||
|
"default password hunter2 machine localhost login login password 114514",
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
eprintln!("{:#?}", machines);
|
||||||
|
|
||||||
|
assert_eq!(Some("114514".to_string()), machines["localhost"].password);
|
||||||
|
|
||||||
|
let mut serialized = String::new();
|
||||||
|
serialize_machines(&mut serialized, &machines).unwrap();
|
||||||
|
eprintln!("{}", serialized);
|
||||||
|
|
||||||
|
let reparse = parse_machines(&serialized).unwrap();
|
||||||
|
assert_eq!(machines, reparse);
|
||||||
|
}
|
||||||
|
}
|
8
client/src/version.rs
Normal file
8
client/src/version.rs
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
/// The distributor of this Attic client.
|
||||||
|
///
|
||||||
|
/// Common values include `nixpkgs`, `attic` and `dev`.
|
||||||
|
pub const ATTIC_DISTRIBUTOR: &str = if let Some(distro) = option_env!("ATTIC_DISTRIBUTOR") {
|
||||||
|
distro
|
||||||
|
} else {
|
||||||
|
"unknown"
|
||||||
|
};
|
8
default.nix
Normal file
8
default.nix
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
let
|
||||||
|
flake = import ./flake-compat.nix;
|
||||||
|
in flake.defaultNix.default.overrideAttrs (_: {
|
||||||
|
passthru = {
|
||||||
|
attic-client = flake.defaultNix.outputs.packages.${builtins.currentSystem}.attic-client;
|
||||||
|
demo = flake.defaultNix.outputs.devShells.${builtins.currentSystem}.demo;
|
||||||
|
};
|
||||||
|
})
|
9
flake-compat.nix
Normal file
9
flake-compat.nix
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
let
|
||||||
|
lock = builtins.fromJSON (builtins.readFile ./flake.lock);
|
||||||
|
flakeCompat = import (fetchTarball {
|
||||||
|
url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
||||||
|
sha256 = lock.nodes.flake-compat.locked.narHash;
|
||||||
|
});
|
||||||
|
in flakeCompat {
|
||||||
|
src = ./.;
|
||||||
|
}
|
60
flake.lock
Normal file
60
flake.lock
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"flake-compat": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1668681692,
|
||||||
|
"narHash": "sha256-Ht91NGdewz8IQLtWZ9LCeNXMSXHUss+9COoqu6JLmXU=",
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"rev": "009399224d5e398d03b22badca40a37ac85412a1",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "edolstra",
|
||||||
|
"repo": "flake-compat",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1672428209,
|
||||||
|
"narHash": "sha256-eejhqkDz2cb2vc5VeaWphJz8UXNuoNoM8/Op8eWv2tQ=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "293a28df6d7ff3dec1e61e37cc4ee6e6c0fb0847",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixpkgs-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-compat": "flake-compat",
|
||||||
|
"nixpkgs": "nixpkgs",
|
||||||
|
"utils": "utils"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"utils": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1667395993,
|
||||||
|
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
91
flake.nix
Normal file
91
flake.nix
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
{
|
||||||
|
description = "A Nix binary cache server";
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||||
|
utils.url = "github:numtide/flake-utils";
|
||||||
|
|
||||||
|
flake-compat = {
|
||||||
|
url = "github:edolstra/flake-compat";
|
||||||
|
flake = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs, utils, ... }: let
|
||||||
|
supportedSystems = utils.lib.defaultSystems;
|
||||||
|
in utils.lib.eachSystem supportedSystems (system: let
|
||||||
|
pkgs = import nixpkgs { inherit system; };
|
||||||
|
|
||||||
|
inherit (pkgs) lib;
|
||||||
|
in rec {
|
||||||
|
packages = {
|
||||||
|
default = packages.attic;
|
||||||
|
|
||||||
|
attic = pkgs.callPackage ./package.nix { };
|
||||||
|
attic-client = packages.attic.override { clientOnly = true; };
|
||||||
|
|
||||||
|
attic-server = let
|
||||||
|
attic-server = pkgs.callPackage ./package.nix {
|
||||||
|
crates = [ "attic-server" ];
|
||||||
|
};
|
||||||
|
in attic-server.overrideAttrs (old: {
|
||||||
|
pname = "attic-server";
|
||||||
|
|
||||||
|
CARGO_PROFILE_RELEASE_LTO = "fat";
|
||||||
|
CARGO_PROFILE_RELEASE_CODEGEN_UNITS = "1";
|
||||||
|
});
|
||||||
|
|
||||||
|
attic-server-image = pkgs.dockerTools.buildImage {
|
||||||
|
name = "attic-server";
|
||||||
|
tag = "main";
|
||||||
|
config = {
|
||||||
|
Entrypoint = [ "${packages.attic-server}/bin/atticd" ];
|
||||||
|
Cmd = [ "--mode" "api-server" ];
|
||||||
|
Env = [
|
||||||
|
"SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
book = pkgs.callPackage ./book {
|
||||||
|
attic = packages.attic;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
devShells = {
|
||||||
|
default = pkgs.mkShell {
|
||||||
|
inputsFrom = with packages; [ attic book ];
|
||||||
|
nativeBuildInputs = with pkgs; [
|
||||||
|
rustfmt clippy
|
||||||
|
cargo-expand cargo-outdated cargo-edit
|
||||||
|
|
||||||
|
sqlite-interactive
|
||||||
|
|
||||||
|
editorconfig-checker
|
||||||
|
|
||||||
|
flyctl
|
||||||
|
] ++ (lib.optionals pkgs.stdenv.isLinux [
|
||||||
|
linuxPackages.perf
|
||||||
|
]);
|
||||||
|
|
||||||
|
NIX_PATH = "nixpkgs=${pkgs.path}";
|
||||||
|
RUST_SRC_PATH = "${pkgs.rustPlatform.rustcSrc}/library";
|
||||||
|
|
||||||
|
ATTIC_DISTRIBUTOR = "dev";
|
||||||
|
};
|
||||||
|
|
||||||
|
demo = pkgs.mkShell {
|
||||||
|
nativeBuildInputs = [
|
||||||
|
packages.default
|
||||||
|
];
|
||||||
|
|
||||||
|
shellHook = ''
|
||||||
|
>&2 echo
|
||||||
|
>&2 echo '🚀 Run `atticd` to get started!'
|
||||||
|
>&2 echo
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
devShell = devShells.default;
|
||||||
|
});
|
||||||
|
}
|
63
package.nix
Normal file
63
package.nix
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
{ lib, stdenv, rustPlatform
|
||||||
|
, pkg-config
|
||||||
|
, installShellFiles
|
||||||
|
, nix
|
||||||
|
, boost
|
||||||
|
, darwin
|
||||||
|
|
||||||
|
# Only build the client
|
||||||
|
, clientOnly ? false
|
||||||
|
|
||||||
|
# Only build certain crates
|
||||||
|
, crates ? if clientOnly then [ "attic-client" ] else []
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
ignoredPaths = [ ".github" "target" "book" ];
|
||||||
|
|
||||||
|
in rustPlatform.buildRustPackage rec {
|
||||||
|
pname = "attic";
|
||||||
|
version = "0.1.0";
|
||||||
|
|
||||||
|
src = lib.cleanSourceWith {
|
||||||
|
filter = name: type: !(type == "directory" && builtins.elem (baseNameOf name) ignoredPaths);
|
||||||
|
src = lib.cleanSource ./.;
|
||||||
|
};
|
||||||
|
|
||||||
|
nativeBuildInputs = [
|
||||||
|
rustPlatform.bindgenHook
|
||||||
|
pkg-config
|
||||||
|
installShellFiles
|
||||||
|
];
|
||||||
|
|
||||||
|
buildInputs = [
|
||||||
|
nix boost
|
||||||
|
] ++ lib.optionals stdenv.isDarwin (with darwin.apple_sdk.frameworks; [
|
||||||
|
SystemConfiguration
|
||||||
|
]);
|
||||||
|
|
||||||
|
cargoHash = "sha256-9gJGY/6m6ao8srnhJ3WzDx35F56lhwZ6t0T3FSn/p7g=";
|
||||||
|
cargoBuildFlags = lib.concatMapStrings (c: "-p ${c} ") crates;
|
||||||
|
|
||||||
|
ATTIC_DISTRIBUTOR = "attic";
|
||||||
|
|
||||||
|
# Recursive Nix is not stable yet
|
||||||
|
doCheck = false;
|
||||||
|
|
||||||
|
postInstall = lib.optionalString (stdenv.hostPlatform == stdenv.buildPlatform) ''
|
||||||
|
if [[ -f $out/bin/attic ]]; then
|
||||||
|
installShellCompletion --cmd attic \
|
||||||
|
--bash <($out/bin/attic gen-completions bash) \
|
||||||
|
--zsh <($out/bin/attic gen-completions zsh) \
|
||||||
|
--fish <($out/bin/attic gen-completions fish)
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
|
||||||
|
meta = with lib; {
|
||||||
|
description = "Multi-tenant Nix binary cache system";
|
||||||
|
homepage = "https://github.com/zhaofengli/attic";
|
||||||
|
license = licenses.agpl3Plus;
|
||||||
|
maintainers = with maintainers; [ zhaofengli ];
|
||||||
|
platforms = platforms.linux ++ platforms.darwin;
|
||||||
|
};
|
||||||
|
}
|
96
server/Cargo.toml
Normal file
96
server/Cargo.toml
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
[package]
|
||||||
|
name = "attic-server"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "attic_server"
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "atticd"
|
||||||
|
path = "src/main.rs"
|
||||||
|
doc = false
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "atticadm"
|
||||||
|
path = "src/adm/main.rs"
|
||||||
|
doc = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
attic = { path = "../attic", default-features = false }
|
||||||
|
|
||||||
|
anyhow = "1.0.68"
|
||||||
|
async-trait = "0.1.60"
|
||||||
|
aws-sdk-s3 = "0.22.0"
|
||||||
|
axum = "0.6.1"
|
||||||
|
axum-macros = "0.3.0"
|
||||||
|
base64 = "0.20.0"
|
||||||
|
bytes = "1.3.0"
|
||||||
|
chrono = "0.4.23"
|
||||||
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
|
derivative = "2.2.0"
|
||||||
|
digest = "0.10.6"
|
||||||
|
displaydoc = "0.2.3"
|
||||||
|
enum-as-inner = "0.5.1"
|
||||||
|
futures = "0.3.25"
|
||||||
|
hex = "0.4.3"
|
||||||
|
humantime = "2.1.0"
|
||||||
|
humantime-serde = "1.1.1"
|
||||||
|
itoa = "1.0.5"
|
||||||
|
jsonwebtoken = "8.2.0"
|
||||||
|
lazy_static = "1.4.0"
|
||||||
|
maybe-owned = "0.3.4"
|
||||||
|
rand = "0.8.5"
|
||||||
|
regex = "1.7.0"
|
||||||
|
ryu = "1.0.12"
|
||||||
|
sha2 = { version = "0.10.6", features = ["asm"] }
|
||||||
|
serde = "1.0.151"
|
||||||
|
serde_json = "1.0.91"
|
||||||
|
serde_with = "2.1.0"
|
||||||
|
tokio-util = { version = "0.7.4", features = [ "io" ] }
|
||||||
|
toml = "0.5.10"
|
||||||
|
tower-http = { version = "0.3.5", features = [ "catch-panic" ] }
|
||||||
|
tracing = "0.1.37"
|
||||||
|
tracing-subscriber = "0.3.16"
|
||||||
|
uuid = { version = "1.2.2", features = ["v4"] }
|
||||||
|
console-subscriber = { version = "0.1.8", optional = true }
|
||||||
|
xdg = "2.4.1"
|
||||||
|
|
||||||
|
[dependencies.async-compression]
|
||||||
|
version = "0.3.15"
|
||||||
|
features = [
|
||||||
|
"tokio",
|
||||||
|
"xz",
|
||||||
|
"zstd",
|
||||||
|
"brotli",
|
||||||
|
]
|
||||||
|
|
||||||
|
[dependencies.sea-orm]
|
||||||
|
version = "0.10.6"
|
||||||
|
features = [
|
||||||
|
"runtime-tokio-rustls",
|
||||||
|
"macros",
|
||||||
|
"sqlx-postgres",
|
||||||
|
"sqlx-sqlite",
|
||||||
|
"debug-print",
|
||||||
|
]
|
||||||
|
|
||||||
|
[dependencies.sea-orm-migration]
|
||||||
|
version = "0.10.6"
|
||||||
|
|
||||||
|
[dependencies.tokio]
|
||||||
|
version = "1.23.0"
|
||||||
|
features = [
|
||||||
|
"fs",
|
||||||
|
"io-util",
|
||||||
|
"macros",
|
||||||
|
"process",
|
||||||
|
"rt",
|
||||||
|
"rt-multi-thread",
|
||||||
|
"sync",
|
||||||
|
]
|
||||||
|
|
||||||
|
[features]
|
||||||
|
tokio-console = ["dep:console-subscriber"]
|
168
server/src/access/http.rs
Normal file
168
server/src/access/http.rs
Normal file
|
@ -0,0 +1,168 @@
|
||||||
|
//! HTTP middlewares for access control.
|
||||||
|
|
||||||
|
use std::str;
|
||||||
|
|
||||||
|
use axum::{http::Request, middleware::Next, response::Response};
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use regex::Regex;
|
||||||
|
use sea_orm::DatabaseConnection;
|
||||||
|
use tokio::sync::OnceCell;
|
||||||
|
|
||||||
|
use crate::access::{CachePermission, Token};
|
||||||
|
use crate::database::{entity::cache::CacheModel, AtticDatabase};
|
||||||
|
use crate::error::ServerResult;
|
||||||
|
use crate::{RequestState, State};
|
||||||
|
use attic::cache::CacheName;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref AUTHORIZATION_REGEX: Regex =
|
||||||
|
Regex::new(r"^(?i)((?P<bearer>bearer)|(?P<basic>basic))(?-i) (?P<rest>(.*))$").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auth state.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct AuthState {
|
||||||
|
/// The JWT token.
|
||||||
|
pub token: OnceCell<Token>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AuthState {
|
||||||
|
/// Returns an auth state with no authenticated user and no permissions.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
token: OnceCell::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finds and performs authorization for a cache.
|
||||||
|
pub async fn auth_cache<F, T>(
|
||||||
|
&self,
|
||||||
|
database: &DatabaseConnection,
|
||||||
|
cache_name: &CacheName,
|
||||||
|
f: F,
|
||||||
|
) -> ServerResult<T>
|
||||||
|
where
|
||||||
|
F: FnOnce(CacheModel, &mut CachePermission) -> ServerResult<T>,
|
||||||
|
{
|
||||||
|
let mut permission = if let Some(token) = self.token.get() {
|
||||||
|
token.get_permission_for_cache(cache_name)
|
||||||
|
} else {
|
||||||
|
CachePermission::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let cache = match database.find_cache(cache_name).await {
|
||||||
|
Ok(d) => {
|
||||||
|
if d.is_public {
|
||||||
|
permission.add_public_permissions();
|
||||||
|
}
|
||||||
|
|
||||||
|
d
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
if permission.can_discover() {
|
||||||
|
return Err(e);
|
||||||
|
} else {
|
||||||
|
return Err(e.into_no_discovery_permissions());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match f(cache, &mut permission) {
|
||||||
|
Ok(t) => Ok(t),
|
||||||
|
Err(e) => {
|
||||||
|
if permission.can_discover() {
|
||||||
|
Err(e)
|
||||||
|
} else {
|
||||||
|
Err(e.into_no_discovery_permissions())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns permission granted for a cache.
|
||||||
|
pub fn get_permission_for_cache(
|
||||||
|
&self,
|
||||||
|
cache: &CacheName,
|
||||||
|
grant_public_permissions: bool,
|
||||||
|
) -> CachePermission {
|
||||||
|
let mut permission = if let Some(token) = self.token.get() {
|
||||||
|
token.get_permission_for_cache(cache)
|
||||||
|
} else {
|
||||||
|
CachePermission::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
if grant_public_permissions {
|
||||||
|
permission.add_public_permissions();
|
||||||
|
}
|
||||||
|
|
||||||
|
permission
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Performs auth.
|
||||||
|
pub async fn apply_auth<B>(req: Request<B>, next: Next<B>) -> Response {
|
||||||
|
let token: Option<Token> = req
|
||||||
|
.headers()
|
||||||
|
.get("Authorization")
|
||||||
|
.and_then(|bytes| bytes.to_str().ok())
|
||||||
|
.and_then(parse_authorization_header)
|
||||||
|
.and_then(|jwt| {
|
||||||
|
let state = req.extensions().get::<State>().unwrap();
|
||||||
|
let res_token = Token::from_jwt(&jwt, &state.config.token_hs256_secret.decoding);
|
||||||
|
if let Err(e) = &res_token {
|
||||||
|
tracing::debug!("Ignoring bad JWT token: {}", e);
|
||||||
|
}
|
||||||
|
res_token.ok()
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(token) = token {
|
||||||
|
let req_state = req.extensions().get::<RequestState>().unwrap();
|
||||||
|
req_state.auth.token.set(token).unwrap();
|
||||||
|
tracing::trace!("Added valid token");
|
||||||
|
}
|
||||||
|
|
||||||
|
next.run(req).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extracts the JWT from an Authorization header.
|
||||||
|
fn parse_authorization_header(authorization: &str) -> Option<String> {
|
||||||
|
let captures = AUTHORIZATION_REGEX.captures(authorization)?;
|
||||||
|
let rest = captures.name("rest").unwrap().as_str();
|
||||||
|
|
||||||
|
if captures.name("bearer").is_some() {
|
||||||
|
// Bearer token
|
||||||
|
Some(rest.to_string())
|
||||||
|
} else {
|
||||||
|
// Basic auth
|
||||||
|
let bytes = base64::decode(rest).ok()?;
|
||||||
|
|
||||||
|
let user_pass = str::from_utf8(&bytes).ok()?;
|
||||||
|
let colon = user_pass.find(':')?;
|
||||||
|
let pass = &user_pass[colon + 1..];
|
||||||
|
|
||||||
|
Some(pass.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parse_authorization_header() {
|
||||||
|
assert_eq!(
|
||||||
|
"somepass",
|
||||||
|
parse_authorization_header("Basic c29tZXVzZXI6c29tZXBhc3M=").unwrap(),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
"somepass",
|
||||||
|
parse_authorization_header("baSIC c29tZXVzZXI6c29tZXBhc3M=").unwrap(),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
"some-token",
|
||||||
|
parse_authorization_header("bearer some-token").unwrap(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
347
server/src/access/mod.rs
Normal file
347
server/src/access/mod.rs
Normal file
|
@ -0,0 +1,347 @@
|
||||||
|
//! Access control.
|
||||||
|
//!
|
||||||
|
//! Access control in Attic is simple and stateless [0] - The server validates
|
||||||
|
//! the JWT against a trusted public key and allows access based on the
|
||||||
|
//! `x-attic-access` claim.
|
||||||
|
//!
|
||||||
|
//! One primary goal of the Attic Server is easy scalability. It's designed
|
||||||
|
//! to be deployed to serverless platforms like AWS Lambda and have fast
|
||||||
|
//! cold-start times. Instances are created and destoyed rapidly in response
|
||||||
|
//! to requests.
|
||||||
|
//!
|
||||||
|
//! [0] We may revisit this later :)
|
||||||
|
//!
|
||||||
|
//! ## Cache discovery
|
||||||
|
//!
|
||||||
|
//! If the JWT grants any permission at all to the requested cache name,
|
||||||
|
//! then the bearer is able to discover the presence of the cache, meaning
|
||||||
|
//! that NoSuchCache or Forbidden can be returned depending on the scenario.
|
||||||
|
//! Otherwise, the user will get a generic 401 response (Unauthorized)
|
||||||
|
//! regardless of the request (or whether the cache exists or not).
|
||||||
|
//!
|
||||||
|
//! ## Supplying the token
|
||||||
|
//!
|
||||||
|
//! The JWT can be supplied to the server in one of two ways:
|
||||||
|
//!
|
||||||
|
//! - As a normal Bearer token.
|
||||||
|
//! - As the password in Basic Auth (used by Nix). The username is ignored.
|
||||||
|
//!
|
||||||
|
//! To add the token to Nix, use the following format in `~/.config/nix/netrc`:
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! machine attic.server.tld password eyJhb...
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! ## Example token
|
||||||
|
//!
|
||||||
|
//! ```json
|
||||||
|
//! {
|
||||||
|
//! "sub": "meow",
|
||||||
|
//! "exp": 4102324986,
|
||||||
|
//! "https://jwt.attic.rs/v1": {
|
||||||
|
//! "caches": {
|
||||||
|
//! "cache-rw": {
|
||||||
|
//! "w": 1,
|
||||||
|
//! "r": 1
|
||||||
|
//! },
|
||||||
|
//! "cache-ro": {
|
||||||
|
//! "r": 1
|
||||||
|
//! },
|
||||||
|
//! "team-*": {
|
||||||
|
//! "w": 1,
|
||||||
|
//! "r": 1,
|
||||||
|
//! "cc": 1
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
pub mod http;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use displaydoc::Display;
|
||||||
|
pub use jsonwebtoken::{
|
||||||
|
Algorithm as JwtAlgorithm, DecodingKey as JwtDecodingKey, EncodingKey as JwtEncodingKey,
|
||||||
|
Header as JwtHeader, Validation as JwtValidation,
|
||||||
|
};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_with::{serde_as, BoolFromInt};
|
||||||
|
|
||||||
|
use crate::error::ServerResult;
|
||||||
|
use attic::cache::{CacheName, CacheNamePattern};
|
||||||
|
|
||||||
|
/// Custom claim namespace for the AtticAccess information.
|
||||||
|
///
|
||||||
|
/// Custom claim namespaces are required by platforms like Auth0, and
|
||||||
|
/// custom claims without one will be silently dropped.
|
||||||
|
///
|
||||||
|
/// <https://auth0.com/docs/security/tokens/json-web-tokens/create-namespaced-custom-claims>
|
||||||
|
///
|
||||||
|
/// Also change the `#[serde(rename)]` below if you change this.
|
||||||
|
pub const CLAIM_NAMESPACE: &str = "https://jwt.attic.rs/v1";
|
||||||
|
|
||||||
|
macro_rules! require_permission_function {
|
||||||
|
($name:ident, $descr:literal, $member:ident) => {
|
||||||
|
pub fn $name(&self) -> ServerResult<()> {
|
||||||
|
if !self.$member {
|
||||||
|
tracing::debug!("Client has no {} permission", $descr);
|
||||||
|
if self.can_discover() {
|
||||||
|
Err(Error::PermissionDenied.into())
|
||||||
|
} else {
|
||||||
|
Err(Error::NoDiscoveryPermission.into())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A validated JSON Web Token.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Token(jsonwebtoken::TokenData<TokenClaims>);
|
||||||
|
|
||||||
|
/// Claims of a JSON Web Token.
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct TokenClaims {
|
||||||
|
/// Subject.
|
||||||
|
sub: String,
|
||||||
|
|
||||||
|
/// Expiration timestamp.
|
||||||
|
exp: usize,
|
||||||
|
|
||||||
|
/// Attic namespace.
|
||||||
|
#[serde(rename = "https://jwt.attic.rs/v1")]
|
||||||
|
attic_ns: AtticAccess,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Permissions granted to a client.
|
||||||
|
///
|
||||||
|
/// This is the content of the `attic-access` claim in JWTs.
|
||||||
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
|
pub struct AtticAccess {
|
||||||
|
/// Cache permissions.
|
||||||
|
///
|
||||||
|
/// Keys here may include wildcards.
|
||||||
|
caches: HashMap<CacheNamePattern, CachePermission>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Permission to a single cache.
|
||||||
|
#[serde_as]
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct CachePermission {
|
||||||
|
/// Can pull objects from the cache.
|
||||||
|
#[serde(default = "CachePermission::permission_default")]
|
||||||
|
#[serde(skip_serializing_if = "is_false")]
|
||||||
|
#[serde(rename = "r")]
|
||||||
|
#[serde_as(as = "BoolFromInt")]
|
||||||
|
pub pull: bool,
|
||||||
|
|
||||||
|
/// Can push objects to the cache.
|
||||||
|
#[serde(default = "CachePermission::permission_default")]
|
||||||
|
#[serde(skip_serializing_if = "is_false")]
|
||||||
|
#[serde(rename = "w")]
|
||||||
|
#[serde_as(as = "BoolFromInt")]
|
||||||
|
pub push: bool,
|
||||||
|
|
||||||
|
/// Can delete objects from the cache.
|
||||||
|
#[serde(default = "CachePermission::permission_default")]
|
||||||
|
#[serde(skip_serializing_if = "is_false")]
|
||||||
|
#[serde(rename = "d")]
|
||||||
|
#[serde_as(as = "BoolFromInt")]
|
||||||
|
pub delete: bool,
|
||||||
|
|
||||||
|
/// Can create the cache itself.
|
||||||
|
#[serde(default = "CachePermission::permission_default")]
|
||||||
|
#[serde(skip_serializing_if = "is_false")]
|
||||||
|
#[serde(rename = "cc")]
|
||||||
|
#[serde_as(as = "BoolFromInt")]
|
||||||
|
pub create_cache: bool,
|
||||||
|
|
||||||
|
/// Can reconfigure the cache.
|
||||||
|
#[serde(default = "CachePermission::permission_default")]
|
||||||
|
#[serde(skip_serializing_if = "is_false")]
|
||||||
|
#[serde(rename = "cr")]
|
||||||
|
#[serde_as(as = "BoolFromInt")]
|
||||||
|
pub configure_cache: bool,
|
||||||
|
|
||||||
|
/// Can configure retention/quota settings.
|
||||||
|
#[serde(default = "CachePermission::permission_default")]
|
||||||
|
#[serde(skip_serializing_if = "is_false")]
|
||||||
|
#[serde(rename = "cq")]
|
||||||
|
#[serde_as(as = "BoolFromInt")]
|
||||||
|
pub configure_cache_retention: bool,
|
||||||
|
|
||||||
|
/// Can destroy the cache itself.
|
||||||
|
#[serde(default = "CachePermission::permission_default")]
|
||||||
|
#[serde(skip_serializing_if = "is_false")]
|
||||||
|
#[serde(rename = "cd")]
|
||||||
|
#[serde_as(as = "BoolFromInt")]
|
||||||
|
pub destroy_cache: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An access error.
|
||||||
|
#[derive(Debug, Display)]
|
||||||
|
#[ignore_extra_doc_attributes]
|
||||||
|
pub enum Error {
|
||||||
|
/// User has no permission to this cache.
|
||||||
|
NoDiscoveryPermission,
|
||||||
|
|
||||||
|
/// User does not have permission to complete this action.
|
||||||
|
///
|
||||||
|
/// This implies that there is some permission granted to the
|
||||||
|
/// user, so the user is authorized to discover the cache.
|
||||||
|
PermissionDenied,
|
||||||
|
|
||||||
|
/// JWT error: {0}
|
||||||
|
TokenError(jsonwebtoken::errors::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Token {
|
||||||
|
/// Verifies and decodes a token.
|
||||||
|
pub fn from_jwt(token: &str, key: &JwtDecodingKey) -> ServerResult<Self> {
|
||||||
|
let validation = JwtValidation::default();
|
||||||
|
jsonwebtoken::decode::<TokenClaims>(token, key, &validation)
|
||||||
|
.map_err(|e| Error::TokenError(e).into())
|
||||||
|
.map(Token)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new token with an expiration timestamp.
|
||||||
|
pub fn new(sub: String, exp: &DateTime<Utc>) -> Self {
|
||||||
|
let claims = TokenClaims {
|
||||||
|
sub,
|
||||||
|
exp: exp.timestamp() as usize,
|
||||||
|
attic_ns: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Self(jsonwebtoken::TokenData {
|
||||||
|
header: JwtHeader::new(JwtAlgorithm::HS256),
|
||||||
|
claims,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Encodes the token.
|
||||||
|
pub fn encode(&self, key: &JwtEncodingKey) -> ServerResult<String> {
|
||||||
|
jsonwebtoken::encode(&self.0.header, &self.0.claims, key)
|
||||||
|
.map_err(|e| Error::TokenError(e).into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the claims as a serializable value.
|
||||||
|
pub fn opaque_claims(&self) -> &impl Serialize {
|
||||||
|
&self.0.claims
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to a permission entry.
|
||||||
|
pub fn get_or_insert_permission_mut(
|
||||||
|
&mut self,
|
||||||
|
pattern: CacheNamePattern,
|
||||||
|
) -> &mut CachePermission {
|
||||||
|
use std::collections::hash_map::Entry;
|
||||||
|
|
||||||
|
let access = self.attic_access_mut();
|
||||||
|
match access.caches.entry(pattern) {
|
||||||
|
Entry::Occupied(v) => v.into_mut(),
|
||||||
|
Entry::Vacant(v) => v.insert(CachePermission::default()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns explicit permission granted for a cache.
|
||||||
|
pub fn get_permission_for_cache(&self, cache: &CacheName) -> CachePermission {
|
||||||
|
let access = self.attic_access();
|
||||||
|
|
||||||
|
let pattern_key = cache.to_pattern();
|
||||||
|
if let Some(direct_match) = access.caches.get(&pattern_key) {
|
||||||
|
return direct_match.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
for (pattern, permission) in access.caches.iter() {
|
||||||
|
if pattern.matches(cache) {
|
||||||
|
return permission.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CachePermission::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn attic_access(&self) -> &AtticAccess {
|
||||||
|
&self.0.claims.attic_ns
|
||||||
|
}
|
||||||
|
|
||||||
|
fn attic_access_mut(&mut self) -> &mut AtticAccess {
|
||||||
|
&mut self.0.claims.attic_ns
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CachePermission {
|
||||||
|
/// Adds implicit grants for public caches.
|
||||||
|
pub fn add_public_permissions(&mut self) {
|
||||||
|
self.pull = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns whether the user is allowed to discover this cache.
|
||||||
|
///
|
||||||
|
/// This permission is implied when any permission is explicitly
|
||||||
|
/// granted.
|
||||||
|
pub const fn can_discover(&self) -> bool {
|
||||||
|
self.push
|
||||||
|
|| self.pull
|
||||||
|
|| self.delete
|
||||||
|
|| self.create_cache
|
||||||
|
|| self.configure_cache
|
||||||
|
|| self.destroy_cache
|
||||||
|
|| self.configure_cache_retention
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn require_discover(&self) -> ServerResult<()> {
|
||||||
|
if !self.can_discover() {
|
||||||
|
Err(Error::NoDiscoveryPermission.into())
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
require_permission_function!(require_pull, "pull", pull);
|
||||||
|
require_permission_function!(require_push, "push", push);
|
||||||
|
require_permission_function!(require_delete, "delete", delete);
|
||||||
|
require_permission_function!(require_create_cache, "create cache", create_cache);
|
||||||
|
require_permission_function!(
|
||||||
|
require_configure_cache,
|
||||||
|
"reconfigure cache",
|
||||||
|
configure_cache
|
||||||
|
);
|
||||||
|
require_permission_function!(
|
||||||
|
require_configure_cache_retention,
|
||||||
|
"configure cache retention",
|
||||||
|
configure_cache_retention
|
||||||
|
);
|
||||||
|
require_permission_function!(require_destroy_cache, "destroy cache", destroy_cache);
|
||||||
|
|
||||||
|
fn permission_default() -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CachePermission {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
pull: false,
|
||||||
|
push: false,
|
||||||
|
delete: false,
|
||||||
|
create_cache: false,
|
||||||
|
configure_cache: false,
|
||||||
|
configure_cache_retention: false,
|
||||||
|
destroy_cache: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// bruh
|
||||||
|
fn is_false(b: &bool) -> bool {
|
||||||
|
!b
|
||||||
|
}
|
76
server/src/access/tests.rs
Normal file
76
server/src/access/tests.rs
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
use attic::cache::CacheName;
|
||||||
|
|
||||||
|
macro_rules! cache {
|
||||||
|
($n:expr) => {
|
||||||
|
CacheName::new($n.to_string()).unwrap()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_basic() {
|
||||||
|
// "very secure secret"
|
||||||
|
let base64_secret = "dmVyeSBzZWN1cmUgc2VjcmV0";
|
||||||
|
|
||||||
|
let dec_key =
|
||||||
|
JwtDecodingKey::from_base64_secret(base64_secret).expect("Could not import decoding key");
|
||||||
|
|
||||||
|
/*
|
||||||
|
{
|
||||||
|
"sub": "meow",
|
||||||
|
"exp": 4102324986,
|
||||||
|
"https://jwt.attic.rs/v1": {
|
||||||
|
"caches": {
|
||||||
|
"cache-rw": {"r":1,"w":1},
|
||||||
|
"cache-ro": {"r":1},
|
||||||
|
"team-*": {"r":1,"w":1,"cc":1}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
let token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJtZW93IiwiZXhwIjo0MTAyMzI0OTg2LCJodHRwczovL2p3dC5hdHRpYy5ycy92MSI6eyJjYWNoZXMiOnsiY2FjaGUtcnciOnsiciI6MSwidyI6MX0sImNhY2hlLXJvIjp7InIiOjF9LCJ0ZWFtLSoiOnsiciI6MSwidyI6MSwiY2MiOjF9fX19.UlsIM9bQHr9SXGAcSQcoVPo9No8Zhh6Y5xfX8vCmKmA";
|
||||||
|
|
||||||
|
let decoded = Token::from_jwt(token, &dec_key).unwrap();
|
||||||
|
|
||||||
|
let perm_rw = decoded.get_permission_for_cache(&cache! { "cache-rw" });
|
||||||
|
|
||||||
|
assert!(perm_rw.pull);
|
||||||
|
assert!(perm_rw.push);
|
||||||
|
assert!(!perm_rw.delete);
|
||||||
|
assert!(!perm_rw.create_cache);
|
||||||
|
|
||||||
|
assert!(perm_rw.require_pull().is_ok());
|
||||||
|
assert!(perm_rw.require_push().is_ok());
|
||||||
|
assert!(perm_rw.require_delete().is_err());
|
||||||
|
assert!(perm_rw.require_create_cache().is_err());
|
||||||
|
|
||||||
|
let perm_ro = decoded.get_permission_for_cache(&cache! { "cache-ro" });
|
||||||
|
|
||||||
|
assert!(perm_ro.pull);
|
||||||
|
assert!(!perm_ro.push);
|
||||||
|
assert!(!perm_ro.delete);
|
||||||
|
assert!(!perm_ro.create_cache);
|
||||||
|
|
||||||
|
assert!(perm_ro.require_pull().is_ok());
|
||||||
|
assert!(perm_ro.require_push().is_err());
|
||||||
|
assert!(perm_ro.require_delete().is_err());
|
||||||
|
assert!(perm_ro.require_create_cache().is_err());
|
||||||
|
|
||||||
|
let perm_team = decoded.get_permission_for_cache(&cache! { "team-xyz" });
|
||||||
|
|
||||||
|
assert!(perm_team.pull);
|
||||||
|
assert!(perm_team.push);
|
||||||
|
assert!(!perm_team.delete);
|
||||||
|
assert!(perm_team.create_cache);
|
||||||
|
|
||||||
|
assert!(perm_team.require_pull().is_ok());
|
||||||
|
assert!(perm_team.require_push().is_ok());
|
||||||
|
assert!(perm_team.require_delete().is_err());
|
||||||
|
assert!(perm_team.require_create_cache().is_ok());
|
||||||
|
|
||||||
|
assert!(!decoded
|
||||||
|
.get_permission_for_cache(&cache! { "forbidden-cache" })
|
||||||
|
.can_discover());
|
||||||
|
}
|
123
server/src/adm/command/make_token.rs
Normal file
123
server/src/adm/command/make_token.rs
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
use anyhow::{anyhow, Result};
|
||||||
|
use chrono::{Duration as ChronoDuration, Utc};
|
||||||
|
use clap::Parser;
|
||||||
|
use humantime::Duration;
|
||||||
|
|
||||||
|
use crate::Opts;
|
||||||
|
use attic::cache::CacheNamePattern;
|
||||||
|
use attic_server::access::Token;
|
||||||
|
use attic_server::config::Config;
|
||||||
|
|
||||||
|
/// Generate a new token.
|
||||||
|
///
|
||||||
|
/// For example, to generate a token for Alice with read-write access
|
||||||
|
/// to any cache starting with `dev-` and read-only access to `prod`,
|
||||||
|
/// expiring in 2 years:
|
||||||
|
///
|
||||||
|
/// $ atticadm make-token --sub "alice" --validity "2y" --pull "dev-*" --push "dev-*" --pull "prod"
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
pub struct MakeToken {
|
||||||
|
/// The subject of the JWT token.
|
||||||
|
#[clap(long)]
|
||||||
|
sub: String,
|
||||||
|
|
||||||
|
/// The validity period of the JWT token.
|
||||||
|
///
|
||||||
|
/// You can use expressions like "2 years", "3 months"
|
||||||
|
/// and "1y".
|
||||||
|
#[clap(long)]
|
||||||
|
validity: Duration,
|
||||||
|
|
||||||
|
/// Dump the claims without signing and encoding it.
|
||||||
|
#[clap(long)]
|
||||||
|
dump_claims: bool,
|
||||||
|
|
||||||
|
/// A cache that the token may pull from.
|
||||||
|
///
|
||||||
|
/// The value may contain wildcards. Specify this flag multiple
|
||||||
|
/// times to allow multiple patterns.
|
||||||
|
#[clap(long = "pull", value_name = "PATTERN")]
|
||||||
|
pull_patterns: Vec<CacheNamePattern>,
|
||||||
|
|
||||||
|
/// A cache that the token may push to.
|
||||||
|
///
|
||||||
|
/// The value may contain wildcards. Specify this flag multiple
|
||||||
|
/// times to allow multiple patterns.
|
||||||
|
#[clap(long = "push", value_name = "PATTERN")]
|
||||||
|
push_patterns: Vec<CacheNamePattern>,
|
||||||
|
|
||||||
|
/// A cache that the token may delete store paths from.
|
||||||
|
///
|
||||||
|
/// The value may contain wildcards. Specify this flag multiple
|
||||||
|
/// times to allow multiple patterns.
|
||||||
|
#[clap(long = "delete", value_name = "PATTERN")]
|
||||||
|
delete_patterns: Vec<CacheNamePattern>,
|
||||||
|
|
||||||
|
/// A cache that the token may create.
|
||||||
|
///
|
||||||
|
/// The value may contain wildcards. Specify this flag multiple
|
||||||
|
/// times to allow multiple patterns.
|
||||||
|
#[clap(long = "create-cache", value_name = "PATTERN")]
|
||||||
|
create_cache_patterns: Vec<CacheNamePattern>,
|
||||||
|
|
||||||
|
/// A cache that the token may configure.
|
||||||
|
///
|
||||||
|
/// The value may contain wildcards. Specify this flag multiple
|
||||||
|
/// times to allow multiple patterns.
|
||||||
|
#[clap(long = "configure-cache", value_name = "PATTERN")]
|
||||||
|
configure_cache_patterns: Vec<CacheNamePattern>,
|
||||||
|
|
||||||
|
/// A cache that the token may configure retention/quota for.
|
||||||
|
///
|
||||||
|
/// The value may contain wildcards. Specify this flag multiple
|
||||||
|
/// times to allow multiple patterns.
|
||||||
|
#[clap(long = "configure-cache-retention", value_name = "PATTERN")]
|
||||||
|
configure_cache_retention_patterns: Vec<CacheNamePattern>,
|
||||||
|
|
||||||
|
/// A cache that the token may destroy.
|
||||||
|
///
|
||||||
|
/// The value may contain wildcards. Specify this flag multiple
|
||||||
|
/// times to allow multiple patterns.
|
||||||
|
#[clap(long = "destroy-cache", value_name = "PATTERN")]
|
||||||
|
destroy_cache_patterns: Vec<CacheNamePattern>,
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! grant_permissions {
|
||||||
|
($token:ident, $list:expr, $perm:ident) => {
|
||||||
|
for pattern in $list {
|
||||||
|
let mut perm = $token.get_or_insert_permission_mut(pattern.to_owned());
|
||||||
|
perm.$perm = true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run(config: Config, opts: Opts) -> Result<()> {
|
||||||
|
let sub = opts.command.as_make_token().unwrap();
|
||||||
|
let duration = ChronoDuration::from_std(sub.validity.into())?;
|
||||||
|
let exp = Utc::now()
|
||||||
|
.checked_add_signed(duration)
|
||||||
|
.ok_or_else(|| anyhow!("Expiry timestamp overflowed"))?;
|
||||||
|
|
||||||
|
let mut token = Token::new(sub.sub.to_owned(), &exp);
|
||||||
|
|
||||||
|
grant_permissions!(token, &sub.pull_patterns, pull);
|
||||||
|
grant_permissions!(token, &sub.push_patterns, push);
|
||||||
|
grant_permissions!(token, &sub.delete_patterns, delete);
|
||||||
|
grant_permissions!(token, &sub.create_cache_patterns, create_cache);
|
||||||
|
grant_permissions!(token, &sub.configure_cache_patterns, configure_cache);
|
||||||
|
grant_permissions!(
|
||||||
|
token,
|
||||||
|
&sub.configure_cache_retention_patterns,
|
||||||
|
configure_cache_retention
|
||||||
|
);
|
||||||
|
grant_permissions!(token, &sub.destroy_cache_patterns, destroy_cache);
|
||||||
|
|
||||||
|
if sub.dump_claims {
|
||||||
|
println!("{}", serde_json::to_string(token.opaque_claims())?);
|
||||||
|
} else {
|
||||||
|
let encoded_token = token.encode(&config.token_hs256_secret.encoding)?;
|
||||||
|
println!("{}", encoded_token);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
1
server/src/adm/command/mod.rs
Normal file
1
server/src/adm/command/mod.rs
Normal file
|
@ -0,0 +1 @@
|
||||||
|
pub mod make_token;
|
48
server/src/adm/main.rs
Normal file
48
server/src/adm/main.rs
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
mod command;
|
||||||
|
|
||||||
|
use std::env;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use anyhow::Result;
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
use enum_as_inner::EnumAsInner;
|
||||||
|
|
||||||
|
use attic_server::config;
|
||||||
|
use command::make_token::{self, MakeToken};
|
||||||
|
|
||||||
|
/// Attic server administration utilities.
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
#[clap(version, author = "Zhaofeng Li <hello@zhaofeng.li>")]
|
||||||
|
#[clap(propagate_version = true)]
|
||||||
|
pub struct Opts {
|
||||||
|
/// Path to the config file.
|
||||||
|
#[clap(short = 'f', long)]
|
||||||
|
config: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// The sub-command.
|
||||||
|
#[clap(subcommand)]
|
||||||
|
pub command: Command,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Subcommand, EnumAsInner)]
|
||||||
|
pub enum Command {
|
||||||
|
MakeToken(MakeToken),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<()> {
|
||||||
|
let opts = Opts::parse();
|
||||||
|
let config = if let Some(config_path) = &opts.config {
|
||||||
|
config::load_config_from_path(config_path)
|
||||||
|
} else if let Ok(config_env) = env::var("ATTIC_SERVER_CONFIG") {
|
||||||
|
config::load_config_from_str(&config_env)
|
||||||
|
} else {
|
||||||
|
config::load_config_from_path(&config::get_xdg_config_path()?)
|
||||||
|
};
|
||||||
|
|
||||||
|
match opts.command {
|
||||||
|
Command::MakeToken(_) => make_token::run(config, opts).await?,
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
211
server/src/api/binary_cache.rs
Normal file
211
server/src/api/binary_cache.rs
Normal file
|
@ -0,0 +1,211 @@
|
||||||
|
//! Nix Binary Cache server.
|
||||||
|
//!
|
||||||
|
//! This module implements the Nix Binary Cache API.
|
||||||
|
//!
|
||||||
|
//! The implementation is based on the specifications at <https://github.com/fzakaria/nix-http-binary-cache-api-spec>.
|
||||||
|
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use axum::{
|
||||||
|
body::StreamBody,
|
||||||
|
extract::{Extension, Path},
|
||||||
|
http::StatusCode,
|
||||||
|
response::{IntoResponse, Redirect, Response},
|
||||||
|
routing::get,
|
||||||
|
Router,
|
||||||
|
};
|
||||||
|
use serde::Serialize;
|
||||||
|
use tokio_util::io::ReaderStream;
|
||||||
|
use tracing::instrument;
|
||||||
|
|
||||||
|
use crate::database::AtticDatabase;
|
||||||
|
use crate::error::{ServerError, ServerResult};
|
||||||
|
use crate::narinfo::NarInfo;
|
||||||
|
use crate::nix_manifest;
|
||||||
|
use crate::storage::Download;
|
||||||
|
use crate::{RequestState, State};
|
||||||
|
use attic::cache::CacheName;
|
||||||
|
use attic::mime;
|
||||||
|
use attic::nix_store::StorePathHash;
|
||||||
|
|
||||||
|
/// Nix cache information.
|
||||||
|
///
|
||||||
|
/// An example of a correct response is as follows:
|
||||||
|
///
|
||||||
|
/// ```text
|
||||||
|
/// StoreDir: /nix/store
|
||||||
|
/// WantMassQuery: 1
|
||||||
|
/// Priority: 40
|
||||||
|
/// ```
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
struct NixCacheInfo {
|
||||||
|
/// Whether this binary cache supports bulk queries.
|
||||||
|
#[serde(rename = "WantMassQuery")]
|
||||||
|
want_mass_query: bool,
|
||||||
|
|
||||||
|
/// The Nix store path this binary cache uses.
|
||||||
|
#[serde(rename = "StoreDir")]
|
||||||
|
store_dir: PathBuf,
|
||||||
|
|
||||||
|
/// The priority of the binary cache.
|
||||||
|
///
|
||||||
|
/// A lower number denotes a higher priority.
|
||||||
|
/// <https://cache.nixos.org> has a priority of 40.
|
||||||
|
#[serde(rename = "Priority")]
|
||||||
|
priority: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IntoResponse for NixCacheInfo {
|
||||||
|
fn into_response(self) -> Response {
|
||||||
|
match nix_manifest::to_string(&self) {
|
||||||
|
Ok(body) => Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.header("Content-Type", mime::NIX_CACHE_INFO)
|
||||||
|
.body(body)
|
||||||
|
.unwrap()
|
||||||
|
.into_response(),
|
||||||
|
Err(e) => e.into_response(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets information on a cache.
|
||||||
|
#[instrument(skip_all, fields(cache_name))]
|
||||||
|
async fn get_nix_cache_info(
|
||||||
|
Extension(state): Extension<State>,
|
||||||
|
Extension(req_state): Extension<RequestState>,
|
||||||
|
Path(cache_name): Path<CacheName>,
|
||||||
|
) -> ServerResult<NixCacheInfo> {
|
||||||
|
let database = state.database().await?;
|
||||||
|
let cache = req_state
|
||||||
|
.auth
|
||||||
|
.auth_cache(database, &cache_name, |cache, permission| {
|
||||||
|
permission.require_pull()?;
|
||||||
|
Ok(cache)
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let info = NixCacheInfo {
|
||||||
|
want_mass_query: true,
|
||||||
|
store_dir: cache.store_dir.into(),
|
||||||
|
priority: cache.priority,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets various information on a store path hash.
|
||||||
|
///
|
||||||
|
/// `/:cache/:path`, which may be one of
|
||||||
|
/// - GET `/:cache/{storePathHash}.narinfo`
|
||||||
|
/// - HEAD `/:cache/{storePathHash}.narinfo`
|
||||||
|
/// - GET `/:cache/{storePathHash}.ls` (not implemented)
|
||||||
|
#[instrument(skip_all, fields(cache_name, path))]
|
||||||
|
async fn get_store_path_info(
|
||||||
|
Extension(state): Extension<State>,
|
||||||
|
Extension(req_state): Extension<RequestState>,
|
||||||
|
Path((cache_name, path)): Path<(CacheName, String)>,
|
||||||
|
) -> ServerResult<NarInfo> {
|
||||||
|
let components: Vec<&str> = path.splitn(2, '.').collect();
|
||||||
|
|
||||||
|
if components.len() != 2 {
|
||||||
|
return Err(ServerError::NotFound);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Other endpoints
|
||||||
|
if components[1] != "narinfo" {
|
||||||
|
return Err(ServerError::NotFound);
|
||||||
|
}
|
||||||
|
|
||||||
|
let store_path_hash = StorePathHash::new(components[0].to_string())?;
|
||||||
|
|
||||||
|
tracing::debug!(
|
||||||
|
"Received request for {}.narinfo in {:?}",
|
||||||
|
store_path_hash.as_str(),
|
||||||
|
cache_name
|
||||||
|
);
|
||||||
|
|
||||||
|
let (object, cache, nar) = state
|
||||||
|
.database()
|
||||||
|
.await?
|
||||||
|
.find_object_by_store_path_hash(&cache_name, &store_path_hash)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let permission = req_state
|
||||||
|
.auth
|
||||||
|
.get_permission_for_cache(&cache_name, cache.is_public);
|
||||||
|
permission.require_pull()?;
|
||||||
|
|
||||||
|
let mut narinfo = object.to_nar_info(&nar)?;
|
||||||
|
|
||||||
|
if narinfo.signature().is_none() {
|
||||||
|
let keypair = cache.keypair()?;
|
||||||
|
narinfo.sign(&keypair);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(narinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets a NAR.
|
||||||
|
///
|
||||||
|
/// - GET `:cache/nar/{storePathHash}.nar`
|
||||||
|
///
|
||||||
|
/// Here we use the store path hash not the NAR hash or file hash
|
||||||
|
/// for better logging. In reality, the files are deduplicated by
|
||||||
|
/// content-addressing.
|
||||||
|
#[instrument(skip_all, fields(cache_name, path))]
|
||||||
|
async fn get_nar(
|
||||||
|
Extension(state): Extension<State>,
|
||||||
|
Extension(req_state): Extension<RequestState>,
|
||||||
|
Path((cache_name, path)): Path<(CacheName, String)>,
|
||||||
|
) -> ServerResult<Response> {
|
||||||
|
let components: Vec<&str> = path.splitn(2, '.').collect();
|
||||||
|
|
||||||
|
if components.len() != 2 {
|
||||||
|
return Err(ServerError::NotFound);
|
||||||
|
}
|
||||||
|
|
||||||
|
if components[1] != "nar" {
|
||||||
|
return Err(ServerError::NotFound);
|
||||||
|
}
|
||||||
|
|
||||||
|
let store_path_hash = StorePathHash::new(components[0].to_string())?;
|
||||||
|
|
||||||
|
tracing::debug!(
|
||||||
|
"Received request for {}.nar in {:?}",
|
||||||
|
store_path_hash.as_str(),
|
||||||
|
cache_name
|
||||||
|
);
|
||||||
|
|
||||||
|
let database = state.database().await?;
|
||||||
|
|
||||||
|
let (object, cache, nar) = database
|
||||||
|
.find_object_by_store_path_hash(&cache_name, &store_path_hash)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let permission = req_state
|
||||||
|
.auth
|
||||||
|
.get_permission_for_cache(&cache_name, cache.is_public);
|
||||||
|
permission.require_pull()?;
|
||||||
|
|
||||||
|
database.bump_object_last_accessed(object.id).await?;
|
||||||
|
|
||||||
|
let remote_file = nar.remote_file.0;
|
||||||
|
let backend = state.storage().await?;
|
||||||
|
match backend.download_file_db(&remote_file).await? {
|
||||||
|
Download::Redirect(uri) => Ok(Redirect::temporary(&uri).into_response()),
|
||||||
|
Download::Stream(stream) => {
|
||||||
|
let stream = ReaderStream::new(stream);
|
||||||
|
let body = StreamBody::new(stream);
|
||||||
|
|
||||||
|
Ok(body.into_response())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_router() -> Router {
|
||||||
|
Router::new()
|
||||||
|
.route("/:cache/nix-cache-info", get(get_nix_cache_info))
|
||||||
|
.route("/:cache/:path", get(get_store_path_info))
|
||||||
|
.route("/:cache/nar/:path", get(get_nar))
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue