From 5aa1623dc2e58914a80e763d884fa337343ec3ac Mon Sep 17 00:00:00 2001 From: Zhaofeng Li Date: Sat, 31 Dec 2022 17:01:07 -0700 Subject: [PATCH] Initial public commit --- .cargo/config | 3 + .editorconfig | 29 + .envrc | 5 + .github/workflows/book.yml | 46 + .github/workflows/build.yml | 27 + .gitignore | 6 + Cargo.lock | 4008 +++++++++++++++++ Cargo.toml | 11 + LICENSE | 13 + README.md | 35 + attic/Cargo.toml | 49 + attic/build.rs | 98 + attic/src/api/mod.rs | 1 + attic/src/api/v1/cache_config.rs | 136 + attic/src/api/v1/get_missing_paths.rs | 25 + attic/src/api/v1/mod.rs | 3 + attic/src/api/v1/upload_path.rs | 52 + attic/src/cache.rs | 266 ++ attic/src/error.rs | 84 + attic/src/hash/mod.rs | 153 + attic/src/hash/tests/.gitattributes | 1 + attic/src/hash/tests/blob | 15 + attic/src/hash/tests/mod.rs | 62 + attic/src/lib.rs | 29 + attic/src/mime.rs | 10 + attic/src/nix_store/README.md | 14 + attic/src/nix_store/bindings/bindgen.hpp | 2 + attic/src/nix_store/bindings/bindgen.rs | 37 + attic/src/nix_store/bindings/mod.rs | 272 ++ attic/src/nix_store/bindings/nix.cpp | 133 + attic/src/nix_store/bindings/nix.hpp | 77 + attic/src/nix_store/mod.rs | 298 ++ attic/src/nix_store/nix_store.rs | 236 + attic/src/nix_store/tests/.gitattributes | 1 + attic/src/nix_store/tests/README.md | 14 + attic/src/nix_store/tests/drv/no-deps.nix | 7 + attic/src/nix_store/tests/drv/with-deps.nix | 21 + attic/src/nix_store/tests/mod.rs | 255 ++ ...h0jan8ny2a712-attic-test-with-deps-c-final | 1 + ...y2a712-attic-test-with-deps-c-final.export | Bin 0 -> 344 bytes ...n8ny2a712-attic-test-with-deps-c-final.nar | Bin 0 -> 136 bytes ...3xi1bbml28f8jj6009p-attic-test-with-deps-b | 1 + ...l28f8jj6009p-attic-test-with-deps-b.export | Bin 0 -> 488 bytes ...bbml28f8jj6009p-attic-test-with-deps-b.nar | Bin 0 -> 208 bytes ...z8qdsxpm6jbhrnxraq2-attic-test-with-deps-a | 1 + ...m6jbhrnxraq2-attic-test-with-deps-a.export | Bin 0 -> 480 bytes ...sxpm6jbhrnxraq2-attic-test-with-deps-a.nar | Bin 0 -> 200 bytes ...j6icmhd2q3260hl1w9zj6li-attic-test-no-deps | 1 + ...2q3260hl1w9zj6li-attic-test-no-deps.export | Bin 0 -> 336 bytes ...mhd2q3260hl1w9zj6li-attic-test-no-deps.nar | Bin 0 -> 144 bytes attic/src/nix_store/tests/test_nar.rs | 245 + attic/src/signing/mod.rs | 271 ++ attic/src/signing/tests.rs | 68 + attic/src/stream.rs | 110 + attic/src/testing/mod.rs | 3 + attic/src/testing/shadow_store/mod.rs | 117 + attic/src/testing/shadow_store/nix-wrapper.sh | 6 + attic/src/util.rs | 39 + book/.gitignore | 1 + book/book.toml | 10 + book/colorized-help.nix | 43 + book/default.nix | 40 + book/src/SUMMARY.md | 11 + book/src/admin-guide/README.md | 3 + book/src/faqs.md | 37 + book/src/introduction.md | 23 + book/src/reference/README.md | 7 + book/src/reference/attic-cli.md | 12 + book/src/reference/atticadm-cli.md | 11 + book/src/reference/atticd-cli.md | 11 + book/src/tutorial.md | 204 + book/src/user-guide/README.md | 40 + client/Cargo.toml | 39 + client/src/api/mod.rs | 219 + client/src/cache.rs | 77 + client/src/cli.rs | 69 + client/src/command/cache.rs | 330 ++ client/src/command/get_closure.rs | 31 + client/src/command/login.rs | 48 + client/src/command/mod.rs | 5 + client/src/command/push.rs | 355 ++ client/src/command/use.rs | 68 + client/src/config.rs | 182 + client/src/main.rs | 36 + client/src/nix_config.rs | 265 ++ client/src/nix_netrc.rs | 248 + client/src/version.rs | 8 + default.nix | 8 + flake-compat.nix | 9 + flake.lock | 60 + flake.nix | 91 + package.nix | 63 + server/Cargo.toml | 96 + server/src/access/http.rs | 168 + server/src/access/mod.rs | 347 ++ server/src/access/tests.rs | 76 + server/src/adm/command/make_token.rs | 123 + server/src/adm/command/mod.rs | 1 + server/src/adm/main.rs | 48 + server/src/api/binary_cache.rs | 211 + server/src/api/mod.rs | 17 + server/src/api/placeholder.html | 37 + server/src/api/v1/cache_config.rs | 231 + server/src/api/v1/get_missing_paths.rs | 69 + server/src/api/v1/mod.rs | 37 + server/src/api/v1/upload_path.rs | 380 ++ server/src/config-template.toml | 105 + server/src/config.rs | 273 ++ server/src/database/entity/cache.rs | 72 + server/src/database/entity/mod.rs | 56 + server/src/database/entity/nar.rs | 120 + server/src/database/entity/object.rs | 128 + .../m20221227_000001_create_cache_table.rs | 67 + .../m20221227_000002_create_nar_table.rs | 71 + .../m20221227_000003_create_object_table.rs | 82 + ...0221227_000004_add_object_last_accessed.rs | 31 + ...21227_000005_add_cache_retention_period.rs | 27 + server/src/database/migration/mod.rs | 24 + server/src/database/mod.rs | 194 + server/src/error.rs | 173 + server/src/gc.rs | 204 + server/src/lib.rs | 224 + server/src/main.rs | 117 + server/src/middleware.rs | 57 + server/src/narinfo/mod.rs | 283 ++ server/src/narinfo/tests.rs | 128 + server/src/nix_manifest/deserializer.rs | 409 ++ server/src/nix_manifest/mod.rs | 136 + server/src/nix_manifest/serializer.rs | 336 ++ server/src/nix_manifest/tests.rs | 55 + server/src/oobe.rs | 103 + server/src/storage/local.rs | 116 + server/src/storage/mod.rs | 86 + server/src/storage/s3.rs | 374 ++ shell.nix | 3 + 135 files changed, 15956 insertions(+) create mode 100644 .cargo/config create mode 100644 .editorconfig create mode 100644 .envrc create mode 100644 .github/workflows/book.yml create mode 100644 .github/workflows/build.yml create mode 100644 .gitignore create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 LICENSE create mode 100644 README.md create mode 100644 attic/Cargo.toml create mode 100644 attic/build.rs create mode 100644 attic/src/api/mod.rs create mode 100644 attic/src/api/v1/cache_config.rs create mode 100644 attic/src/api/v1/get_missing_paths.rs create mode 100644 attic/src/api/v1/mod.rs create mode 100644 attic/src/api/v1/upload_path.rs create mode 100644 attic/src/cache.rs create mode 100644 attic/src/error.rs create mode 100644 attic/src/hash/mod.rs create mode 100644 attic/src/hash/tests/.gitattributes create mode 100644 attic/src/hash/tests/blob create mode 100644 attic/src/hash/tests/mod.rs create mode 100644 attic/src/lib.rs create mode 100644 attic/src/mime.rs create mode 100644 attic/src/nix_store/README.md create mode 100644 attic/src/nix_store/bindings/bindgen.hpp create mode 100644 attic/src/nix_store/bindings/bindgen.rs create mode 100644 attic/src/nix_store/bindings/mod.rs create mode 100644 attic/src/nix_store/bindings/nix.cpp create mode 100644 attic/src/nix_store/bindings/nix.hpp create mode 100644 attic/src/nix_store/mod.rs create mode 100644 attic/src/nix_store/nix_store.rs create mode 100644 attic/src/nix_store/tests/.gitattributes create mode 100644 attic/src/nix_store/tests/README.md create mode 100755 attic/src/nix_store/tests/drv/no-deps.nix create mode 100755 attic/src/nix_store/tests/drv/with-deps.nix create mode 100644 attic/src/nix_store/tests/mod.rs create mode 100644 attic/src/nix_store/tests/nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final create mode 100644 attic/src/nix_store/tests/nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.export create mode 100644 attic/src/nix_store/tests/nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.nar create mode 100644 attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b create mode 100644 attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.export create mode 100644 attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.nar create mode 100644 attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a create mode 100644 attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.export create mode 100644 attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.nar create mode 100644 attic/src/nix_store/tests/nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps create mode 100644 attic/src/nix_store/tests/nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.export create mode 100644 attic/src/nix_store/tests/nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.nar create mode 100644 attic/src/nix_store/tests/test_nar.rs create mode 100644 attic/src/signing/mod.rs create mode 100644 attic/src/signing/tests.rs create mode 100644 attic/src/stream.rs create mode 100644 attic/src/testing/mod.rs create mode 100644 attic/src/testing/shadow_store/mod.rs create mode 100644 attic/src/testing/shadow_store/nix-wrapper.sh create mode 100644 attic/src/util.rs create mode 100644 book/.gitignore create mode 100644 book/book.toml create mode 100644 book/colorized-help.nix create mode 100644 book/default.nix create mode 100644 book/src/SUMMARY.md create mode 100644 book/src/admin-guide/README.md create mode 100644 book/src/faqs.md create mode 100644 book/src/introduction.md create mode 100644 book/src/reference/README.md create mode 100644 book/src/reference/attic-cli.md create mode 100644 book/src/reference/atticadm-cli.md create mode 100644 book/src/reference/atticd-cli.md create mode 100644 book/src/tutorial.md create mode 100644 book/src/user-guide/README.md create mode 100644 client/Cargo.toml create mode 100644 client/src/api/mod.rs create mode 100644 client/src/cache.rs create mode 100644 client/src/cli.rs create mode 100644 client/src/command/cache.rs create mode 100644 client/src/command/get_closure.rs create mode 100644 client/src/command/login.rs create mode 100644 client/src/command/mod.rs create mode 100644 client/src/command/push.rs create mode 100644 client/src/command/use.rs create mode 100644 client/src/config.rs create mode 100644 client/src/main.rs create mode 100644 client/src/nix_config.rs create mode 100644 client/src/nix_netrc.rs create mode 100644 client/src/version.rs create mode 100644 default.nix create mode 100644 flake-compat.nix create mode 100644 flake.lock create mode 100644 flake.nix create mode 100644 package.nix create mode 100644 server/Cargo.toml create mode 100644 server/src/access/http.rs create mode 100644 server/src/access/mod.rs create mode 100644 server/src/access/tests.rs create mode 100644 server/src/adm/command/make_token.rs create mode 100644 server/src/adm/command/mod.rs create mode 100644 server/src/adm/main.rs create mode 100644 server/src/api/binary_cache.rs create mode 100644 server/src/api/mod.rs create mode 100644 server/src/api/placeholder.html create mode 100644 server/src/api/v1/cache_config.rs create mode 100644 server/src/api/v1/get_missing_paths.rs create mode 100644 server/src/api/v1/mod.rs create mode 100644 server/src/api/v1/upload_path.rs create mode 100644 server/src/config-template.toml create mode 100644 server/src/config.rs create mode 100644 server/src/database/entity/cache.rs create mode 100644 server/src/database/entity/mod.rs create mode 100644 server/src/database/entity/nar.rs create mode 100644 server/src/database/entity/object.rs create mode 100644 server/src/database/migration/m20221227_000001_create_cache_table.rs create mode 100644 server/src/database/migration/m20221227_000002_create_nar_table.rs create mode 100644 server/src/database/migration/m20221227_000003_create_object_table.rs create mode 100644 server/src/database/migration/m20221227_000004_add_object_last_accessed.rs create mode 100644 server/src/database/migration/m20221227_000005_add_cache_retention_period.rs create mode 100644 server/src/database/migration/mod.rs create mode 100644 server/src/database/mod.rs create mode 100644 server/src/error.rs create mode 100644 server/src/gc.rs create mode 100644 server/src/lib.rs create mode 100644 server/src/main.rs create mode 100644 server/src/middleware.rs create mode 100644 server/src/narinfo/mod.rs create mode 100644 server/src/narinfo/tests.rs create mode 100644 server/src/nix_manifest/deserializer.rs create mode 100644 server/src/nix_manifest/mod.rs create mode 100644 server/src/nix_manifest/serializer.rs create mode 100644 server/src/nix_manifest/tests.rs create mode 100644 server/src/oobe.rs create mode 100644 server/src/storage/local.rs create mode 100644 server/src/storage/mod.rs create mode 100644 server/src/storage/s3.rs create mode 100644 shell.nix diff --git a/.cargo/config b/.cargo/config new file mode 100644 index 0000000..6e54197 --- /dev/null +++ b/.cargo/config @@ -0,0 +1,3 @@ +[build] +rustflags = ["--cfg", "tokio_unstable"] +rustdocflags = ["--cfg", "tokio_unstable"] diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..1fd750b --- /dev/null +++ b/.editorconfig @@ -0,0 +1,29 @@ +# EditorConfig configuration for Attic + +# Top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file, utf-8 charset +[*] +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +charset = utf-8 + +# Rust +[*.rs] +indent_style = space +indent_size = 2 + +# Misc +[*.{yaml,yml,md,nix}] +indent_style = space +indent_size = 2 + +[attic/src/nix_store/tests/nar/**] +charset = unset +end_of_line = unset +insert_final_newline = unset +trim_trailing_whitespace = unset +indent_style = unset +indent_size = unset diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..24bc68b --- /dev/null +++ b/.envrc @@ -0,0 +1,5 @@ +if ! has nix_direnv_version || ! nix_direnv_version 2.2.0; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.2.0/direnvrc" "sha256-5EwyKnkJNQeXrRkYbwwRBcXbibosCJqyIUuz9Xq+LRc=" +fi + +use_flake diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml new file mode 100644 index 0000000..789cfcb --- /dev/null +++ b/.github/workflows/book.yml @@ -0,0 +1,46 @@ +name: Deploy Book + +on: + push: + +permissions: + contents: read + pages: write + id-token: write + +jobs: + deploy-unstable: + name: Deploy + + runs-on: ubuntu-latest + if: github.repository == 'zhaofengli/attic' + + steps: + - uses: actions/checkout@v3.0.2 + + - name: Obtain current username + run: | + echo ACTION_USER=$USER >> $GITHUB_ENV + + - uses: DeterminateSystems/nix-installer@main + with: + extra-conf: | + trusted-users = root ${{ env.ACTION_USER }} + substituters = https://staging.attic.rs/attic-ci https://cache.nixos.org + trusted-public-keys = attic-ci:U5Sey4mUxwBXM3iFapmP0/ogODXywKLRNgRPQpEXxbo= cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + + # == Manual + - name: Build book + run: nix build .#book -L + + - name: Copy book artifact + run: | + cp --recursive --dereference --no-preserve=mode,ownership result public + + - name: Upload book artifact + uses: actions/upload-pages-artifact@v1.0.7 + with: + path: public + + - name: Deploy book + uses: actions/deploy-pages@v1.2.3 diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..ad7d7f4 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,27 @@ +name: Build +on: + pull_request: + push: +jobs: + tests: + strategy: + matrix: + os: + - ubuntu-latest + - macos-11 + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v2.3.5 + + - name: Obtain current username + run: | + echo ACTION_USER=$USER >> $GITHUB_ENV + + - uses: DeterminateSystems/nix-installer@main + with: + extra-conf: | + trusted-users = root ${{ env.ACTION_USER }} + substituters = https://staging.attic.rs/attic-ci https://cache.nixos.org + trusted-public-keys = attic-ci:U5Sey4mUxwBXM3iFapmP0/ogODXywKLRNgRPQpEXxbo= cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + + - run: nix develop --command -- cargo test diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..817deaf --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +.direnv + +/target +result + +fly.toml diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..63a918b --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,4008 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom", + "once_cell", + "version_check", +] + +[[package]] +name = "aho-corasick" +version = "0.7.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +dependencies = [ + "memchr", +] + +[[package]] +name = "aliasable" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd" + +[[package]] +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" + +[[package]] +name = "arrayvec" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" + +[[package]] +name = "async-compression" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a" +dependencies = [ + "brotli", + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "xz2", + "zstd", + "zstd-safe", +] + +[[package]] +name = "async-stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "async-trait" +version = "0.1.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d1d8ab452a3936018a687b20e6f7cf5363d713b732b8884001317b0e48aa3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atoi" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c57d12312ff59c811c0643f4d80830505833c9ffaebd193d819392b265be8e" +dependencies = [ + "num-traits", +] + +[[package]] +name = "attic" +version = "0.1.0" +dependencies = [ + "base64 0.20.0", + "bindgen", + "cxx", + "cxx-build", + "digest", + "displaydoc", + "ed25519-compact", + "futures", + "hex", + "lazy_static", + "log", + "nix-base32", + "pkg-config", + "regex", + "serde", + "serde_json", + "serde_yaml", + "sha2", + "tempfile", + "tokio", + "tokio-test", + "wildmatch", + "xdg", +] + +[[package]] +name = "attic-client" +version = "0.1.0" +dependencies = [ + "anyhow", + "attic", + "bytes", + "clap 4.0.32", + "clap_complete", + "const_format", + "dialoguer", + "displaydoc", + "enum-as-inner", + "futures", + "humantime", + "indicatif", + "lazy_static", + "regex", + "reqwest", + "serde", + "serde_json", + "tokio", + "toml", + "tracing", + "tracing-subscriber", + "xdg", +] + +[[package]] +name = "attic-server" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-compression", + "async-trait", + "attic", + "aws-sdk-s3", + "axum", + "axum-macros", + "base64 0.20.0", + "bytes", + "chrono", + "clap 4.0.32", + "console-subscriber", + "derivative", + "digest", + "displaydoc", + "enum-as-inner", + "futures", + "hex", + "humantime", + "humantime-serde", + "itoa", + "jsonwebtoken", + "lazy_static", + "maybe-owned", + "rand", + "regex", + "ryu", + "sea-orm", + "sea-orm-migration", + "serde", + "serde_json", + "serde_with", + "sha2", + "tokio", + "tokio-util", + "toml", + "tower-http", + "tracing", + "tracing-subscriber", + "uuid", + "xdg", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "aws-endpoint" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "253d7cd480bfa59a5323390e9e91885a8f06a275e0517d81eeb1070b6aa7d271" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "aws-types", + "http", + "regex", + "tracing", +] + +[[package]] +name = "aws-http" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd1b83859383e46ea8fda633378f9f3f02e6e3a446fd89f0240b5c3662716c9" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "aws-types", + "bytes", + "http", + "http-body", + "lazy_static", + "percent-encoding", + "pin-project-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-s3" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4d240ff751efc65099d18f6b0fb80360b31a298cec7b392c511692bec4a6e21" +dependencies = [ + "aws-endpoint", + "aws-http", + "aws-sig-auth", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-checksums", + "aws-smithy-client", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "bytes-utils", + "fastrand", + "http", + "http-body", + "tokio-stream", + "tower", + "tracing", +] + +[[package]] +name = "aws-sig-auth" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6126c4ff918e35fb9ae1bf2de71157fad36f0cc6a2b1d0f7197ee711713700fc" +dependencies = [ + "aws-sigv4", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-types", + "http", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c7f88d7395f5411c6eef5889b6cd577ce6b677af461356cbfc20176c26c160" +dependencies = [ + "aws-smithy-eventstream", + "aws-smithy-http", + "bytes", + "form_urlencoded", + "hex", + "hmac", + "http", + "once_cell", + "percent-encoding", + "regex", + "sha2", + "time 0.3.17", + "tracing", +] + +[[package]] +name = "aws-smithy-async" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e6a895d68852dd1564328e63ef1583e5eb307dd2a5ebf35d862a5c402957d5e" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", + "tokio-stream", +] + +[[package]] +name = "aws-smithy-checksums" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b847d960abc993319d77b52e82971e2bbdce94f6192df42142e14ed5c9c917" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "bytes", + "crc32c", + "crc32fast", + "hex", + "http", + "http-body", + "md-5", + "pin-project-lite", + "sha1", + "sha2", + "tracing", +] + +[[package]] +name = "aws-smithy-client" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f505bf793eb3e6d7c166ef1275c27b4b2cd5361173fe950ac8e2cfc08c29a7ef" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-types", + "bytes", + "fastrand", + "http", + "http-body", + "hyper", + "hyper-rustls", + "lazy_static", + "pin-project-lite", + "tokio", + "tower", + "tracing", +] + +[[package]] +name = "aws-smithy-eventstream" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d751c99da757aecc1408ab6b2d65e9493220a5e7a68bcafa4f07b6fd1bc473f1" +dependencies = [ + "aws-smithy-types", + "bytes", + "crc32fast", +] + +[[package]] +name = "aws-smithy-http" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37e4b4304b7ea4af1af3e08535100eb7b6459d5a6264b92078bf85176d04ab85" +dependencies = [ + "aws-smithy-eventstream", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "http", + "http-body", + "hyper", + "once_cell", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "aws-smithy-http-tower" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e86072ecc4dc4faf3e2071144285cfd539263fe7102b701d54fb991eafb04af8" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "bytes", + "http", + "http-body", + "pin-project-lite", + "tower", + "tracing", +] + +[[package]] +name = "aws-smithy-types" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "987b1e37febb9bd409ca0846e82d35299e572ad8279bc404778caeb5fc05ad56" +dependencies = [ + "base64-simd", + "itoa", + "num-integer", + "ryu", + "time 0.3.17", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37ce3791e14eec75ffac851a5a559f1ce6b31843297f42cc8bfba82714a6a5d8" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c05adca3e2bcf686dd2c47836f216ab52ed7845c177d180c84b08522c1166a3" +dependencies = [ + "aws-smithy-async", + "aws-smithy-client", + "aws-smithy-http", + "aws-smithy-types", + "http", + "rustc_version", + "tracing", + "zeroize", +] + +[[package]] +name = "axum" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08b108ad2665fa3f6e6a517c3d80ec3e77d224c47d605167aefaa5d7ef97fa48" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79b8558f5a0581152dc94dcd289132a1d377494bdeafcd41869b3258e3e2ad92" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4df0fc33ada14a338b799002f7e8657711422b25d4e16afb032708d6b185621" +dependencies = [ + "heck 0.4.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "bae" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b8de67cc41132507eeece2584804efcb15f85ba516e34c944b7667f480397a" +dependencies = [ + "heck 0.3.3", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" + +[[package]] +name = "base64-simd" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "781dd20c3aff0bd194fe7d2a977dd92f21c173891f3a03b677359e5fa457e5d5" +dependencies = [ + "simd-abstraction", +] + +[[package]] +name = "bindgen" +version = "0.63.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36d860121800b2a9a94f9b5604b332d5cffb234ce17609ea479d723dbc9d3885" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "log", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn", + "which", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "block-buffer" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +dependencies = [ + "generic-array", +] + +[[package]] +name = "borsh" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" +dependencies = [ + "borsh-derive", + "hashbrown 0.11.2", +] + +[[package]] +name = "borsh-derive" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate", + "proc-macro2", + "syn", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "brotli" +version = "3.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + +[[package]] +name = "bumpalo" +version = "3.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" + +[[package]] +name = "bytecheck" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +dependencies = [ + "bytecheck_derive", + "ptr_meta", +] + +[[package]] +name = "bytecheck_derive" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "bytes" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" + +[[package]] +name = "bytes-utils" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e47d3a8076e283f3acd27400535992edb3ba4b5bb72f8891ad8fbe7932a7d4b9" +dependencies = [ + "bytes", + "either", +] + +[[package]] +name = "cc" +version = "1.0.78" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +dependencies = [ + "jobserver", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +dependencies = [ + "iana-time-zone", + "js-sys", + "num-integer", + "num-traits", + "serde", + "time 0.1.45", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "clang-sys" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "3.2.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" +dependencies = [ + "atty", + "bitflags", + "clap_derive 3.2.18", + "clap_lex 0.2.4", + "indexmap", + "once_cell", + "strsim", + "termcolor", + "textwrap", +] + +[[package]] +name = "clap" +version = "4.0.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7db700bc935f9e43e88d00b0850dae18a63773cfbec6d8e070fccf7fef89a39" +dependencies = [ + "bitflags", + "clap_derive 4.0.21", + "clap_lex 0.3.0", + "is-terminal", + "once_cell", + "strsim", + "termcolor", +] + +[[package]] +name = "clap_complete" +version = "4.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10861370d2ba66b0f5989f83ebf35db6421713fd92351790e7fdd6c36774c56b" +dependencies = [ + "clap 4.0.32", +] + +[[package]] +name = "clap_derive" +version = "3.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" +dependencies = [ + "heck 0.4.0", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_derive" +version = "4.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014" +dependencies = [ + "heck 0.4.0", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "clap_lex" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + +[[package]] +name = "console" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5556015fe3aad8b968e5d4124980fbe2f6aaee7aeec6b749de1faaa2ca5d0a4c" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "unicode-width", + "windows-sys 0.42.0", +] + +[[package]] +name = "console-api" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57ff02e8ad8e06ab9731d5dc72dc23bef9200778eae1a89d555d8c42e5d4a86" +dependencies = [ + "prost", + "prost-types", + "tonic", + "tracing-core", +] + +[[package]] +name = "console-subscriber" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22a3a81dfaf6b66bce5d159eddae701e3a002f194d378cbf7be5f053c281d9be" +dependencies = [ + "console-api", + "crossbeam-channel", + "crossbeam-utils", + "futures", + "hdrhistogram", + "humantime", + "prost-types", + "serde", + "serde_json", + "thread_local", + "tokio", + "tokio-stream", + "tonic", + "tracing", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "const_format" +version = "0.2.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7309d9b4d3d2c0641e018d449232f2e28f1b22933c137f157d3dbc14228b8c0e" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f47bf7270cf70d370f8f98c1abb6d2d4cf60a6845d30e05bfb90c6568650" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + +[[package]] +name = "cpufeatures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +dependencies = [ + "libc", +] + +[[package]] +name = "crc" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53757d12b596c16c78b83458d732a5d1a17ab3f53f2f7412f6fb57cc8a140ab3" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d0165d2900ae6778e36e80bbc4da3b5eefccee9ba939761f9c2882a5d9af3ff" + +[[package]] +name = "crc32c" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dfea2db42e9927a3845fb268a10a72faed6d416065f77873f05e411457c363e" +dependencies = [ + "rustc_version", +] + +[[package]] +name = "crc32fast" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "ct-codecs" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3b7eb4404b8195a9abb6356f4ac07d8ba267045c8d6d220ac4dc992e6cc75df" + +[[package]] +name = "cxx" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5add3fc1717409d029b20c5b6903fc0c0b02fa6741d820054f4a2efa5e5816fd" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c87959ba14bc6fbc61df77c3fcfe180fc32b93538c4f1031dd802ccb5f2ff0" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69a3e162fde4e594ed2b07d0f83c6c67b745e7f28ce58c6df5e6b6bef99dfb59" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e7e2adeb6a0d4a282e581096b06e1791532b7d576dcde5ccd9382acf55db8e6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "darling" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dialoguer" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a92e7e37ecef6857fdc0c0c5d42fd5b0938e46590c2183cc92dd310a6d078eb1" +dependencies = [ + "console", + "tempfile", + "zeroize", +] + +[[package]] +name = "digest" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "dirs" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "displaydoc" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dotenvy" +version = "0.15.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03d8c417d7a8cb362e0c37e5d815f5eb7c37f79ff93707329d5a194e42e54ca0" + +[[package]] +name = "ed25519-compact" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a3d382e8464107391c8706b4c14b087808ecb909f6c15c34114bc42e53a9e4c" +dependencies = [ + "ct-codecs", + "getrandom", +] + +[[package]] +name = "either" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" + +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + +[[package]] +name = "encoding_rs" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "enum-as-inner" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" +dependencies = [ + "heck 0.4.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "errno" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +dependencies = [ + "errno-dragonfly", + "libc", + "winapi", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "fastrand" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +dependencies = [ + "instant", +] + +[[package]] +name = "flate2" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "flume" +version = "0.10.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" +dependencies = [ + "futures-core", + "futures-sink", + "pin-project", + "spin 0.9.4", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" + +[[package]] +name = "futures-executor" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-intrusive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot 0.11.2", +] + +[[package]] +name = "futures-io" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" + +[[package]] +name = "futures-macro" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" + +[[package]] +name = "futures-task" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" + +[[package]] +name = "futures-util" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + +[[package]] +name = "h2" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashlink" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" +dependencies = [ + "hashbrown 0.12.3", +] + +[[package]] +name = "hdrhistogram" +version = "7.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f19b9f54f7c7f55e31401bb647626ce0cf0f67b0004982ce815b3ee72a02aa8" +dependencies = [ + "base64 0.13.1", + "byteorder", + "flate2", + "nom", + "num-traits", +] + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hkdf" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +dependencies = [ + "hmac", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "http" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + +[[package]] +name = "hyper" +version = "0.14.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +dependencies = [ + "http", + "hyper", + "log", + "rustls", + "rustls-native-certs", + "tokio", + "tokio-rustls", + "webpki-roots", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexmap" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indicatif" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4295cbb7573c16d310e99e713cf9e75101eb190ab31fccd35f2d2691b4352b19" +dependencies = [ + "console", + "number_prefix", + "portable-atomic", + "unicode-width", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" +dependencies = [ + "libc", + "windows-sys 0.42.0", +] + +[[package]] +name = "ipnet" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11b0d96e660696543b251e58030cf9787df56da39dab19ad60eae7353040917e" + +[[package]] +name = "is-terminal" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dfb6c8100ccc63462345b67d1bbc3679177c75ee4bf59bf29c8b1d110b8189" +dependencies = [ + "hermit-abi 0.2.6", + "io-lifetimes", + "rustix", + "windows-sys 0.42.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" + +[[package]] +name = "jobserver" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" +dependencies = [ + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "jsonwebtoken" +version = "8.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" +dependencies = [ + "base64 0.13.1", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.139" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" + +[[package]] +name = "libloading" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +dependencies = [ + "cfg-if", + "winapi", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "898745e570c7d0453cc1fbc4a701eb6c662ed54e8fec8b7d14be137ebeeb9d14" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "link-cplusplus" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" +dependencies = [ + "cc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + +[[package]] +name = "lock_api" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "lzma-sys" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matchit" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" + +[[package]] +name = "maybe-owned" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4facc753ae494aeb6e3c22f839b158aebd4f9270f55cd3c79906c45476c47ab4" + +[[package]] +name = "md-5" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +dependencies = [ + "digest", +] + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "mime" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +dependencies = [ + "libc", + "log", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.42.0", +] + +[[package]] +name = "nix-base32" +version = "0.1.2-alpha.0" +source = "git+https://github.com/zhaofengli/nix-base32.git?rev=b850c6e9273d1c39bd93abb704a53345f5be92eb#b850c6e9273d1c39bd93abb704a53345f5be92eb" + +[[package]] +name = "nom" +version = "7.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +dependencies = [ + "hermit-abi 0.2.6", + "libc", +] + +[[package]] +name = "number_prefix" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" + +[[package]] +name = "once_cell" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "os_str_bytes" +version = "6.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" + +[[package]] +name = "ouroboros" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbb50b356159620db6ac971c6d5c9ab788c9cc38a6f49619fca2a27acb062ca" +dependencies = [ + "aliasable", + "ouroboros_macro", +] + +[[package]] +name = "ouroboros_macro" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a0d9d1a6191c4f391f87219d1ea42b23f09ee84d64763cd05ee6ea88d9f384d" +dependencies = [ + "Inflector", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "outref" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f222829ae9293e33a9f5e9f440c6760a3d450a64affe1846486b140db81c1f4" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core 0.8.6", +] + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core 0.9.5", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall", + "smallvec", + "winapi", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-sys 0.42.0", +] + +[[package]] +name = "paste" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" + +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + +[[package]] +name = "pem" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" +dependencies = [ + "base64 0.13.1", +] + +[[package]] +name = "percent-encoding" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" + +[[package]] +name = "pin-project" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" + +[[package]] +name = "portable-atomic" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26f6a7b87c2e435a3241addceeeff740ff8b7e76b74c13bf9acb17fa454ea00b" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c01db6702aa05baa3f57dec92b8eeeeb4cb19e894e73996b32a4093289e54592" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8842bad1a5419bca14eac663ba798f6bc19c413c2fdceb5f3ba3b0932d96720" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-types" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "017f79637768cde62820bc2d4fe0e45daaa027755c323ad077767c6c5f173091" +dependencies = [ + "bytes", + "prost", +] + +[[package]] +name = "ptr_meta" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "quote" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags", +] + +[[package]] +name = "redox_users" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +dependencies = [ + "getrandom", + "redox_syscall", + "thiserror", +] + +[[package]] +name = "regex" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "rend" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95" +dependencies = [ + "bytecheck", +] + +[[package]] +name = "reqwest" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +dependencies = [ + "base64 0.13.1", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-rustls", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-rustls", + "tokio-util", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", + "winreg", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin 0.5.2", + "untrusted", + "web-sys", + "winapi", +] + +[[package]] +name = "rkyv" +version = "0.7.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" +dependencies = [ + "bytecheck", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "rust_decimal" +version = "1.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33c321ee4e17d2b7abe12b5d20c1231db708dd36185c8a21e9de5fed6da4dbe9" +dependencies = [ + "arrayvec", + "borsh", + "bytecheck", + "byteorder", + "bytes", + "num-traits", + "rand", + "rkyv", + "serde", + "serde_json", +] + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "0.36.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4feacf7db682c6c329c4ede12649cd36ecab0f3be5b7d74e6a20304725db4549" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.42.0", +] + +[[package]] +name = "rustls" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +dependencies = [ + "log", + "ring", + "sct", + "webpki", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +dependencies = [ + "base64 0.13.1", +] + +[[package]] +name = "rustversion" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" + +[[package]] +name = "ryu" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" + +[[package]] +name = "schannel" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +dependencies = [ + "lazy_static", + "windows-sys 0.36.1", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "scratch" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" + +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "sea-orm" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc2db217f2061ab2bbb1bd22323a533ace0617f97690919f3ed3894e1b3ba170" +dependencies = [ + "async-stream", + "async-trait", + "chrono", + "futures", + "futures-util", + "log", + "ouroboros", + "rust_decimal", + "sea-orm-macros", + "sea-query", + "sea-query-binder", + "sea-strum", + "serde", + "serde_json", + "sqlx", + "thiserror", + "time 0.3.17", + "tracing", + "url", + "uuid", +] + +[[package]] +name = "sea-orm-cli" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebcce92f0f804acd10b4378a3c8b0e5fb28f3a9ae9337006bd651baa3a95632c" +dependencies = [ + "chrono", + "clap 3.2.23", + "dotenvy", + "regex", + "sea-schema", + "tracing", + "tracing-subscriber", + "url", +] + +[[package]] +name = "sea-orm-macros" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38066057ef1fa17ddc6ce1458cf269862b8f1df919497d110ea127b549a90fbd" +dependencies = [ + "bae", + "heck 0.3.3", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sea-orm-migration" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ada716f9825e4190a0a8ebaecbf7171ce0ed6f218ea2e70086bdc72ccfc1d03c" +dependencies = [ + "async-trait", + "clap 3.2.23", + "dotenvy", + "sea-orm", + "sea-orm-cli", + "sea-schema", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "sea-query" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4f0fc4d8e44e1d51c739a68d336252a18bc59553778075d5e32649be6ec92ed" +dependencies = [ + "chrono", + "rust_decimal", + "sea-query-derive", + "serde_json", + "time 0.3.17", + "uuid", +] + +[[package]] +name = "sea-query-binder" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c2585b89c985cfacfe0ec9fc9e7bb055b776c1a2581c4e3c6185af2b8bf8865" +dependencies = [ + "chrono", + "rust_decimal", + "sea-query", + "serde_json", + "sqlx", + "time 0.3.17", + "uuid", +] + +[[package]] +name = "sea-query-derive" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34cdc022b4f606353fe5dc85b09713a04e433323b70163e81513b141c6ae6eb5" +dependencies = [ + "heck 0.3.3", + "proc-macro2", + "quote", + "syn", + "thiserror", +] + +[[package]] +name = "sea-schema" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d5fda574d980e9352b6c7abd6fc75697436fe0078cac2b548559b52643ad3b" +dependencies = [ + "futures", + "sea-query", + "sea-schema-derive", +] + +[[package]] +name = "sea-schema-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56821b7076f5096b8f726e2791ad255a99c82498e08ec477a65a96c461ff1927" +dependencies = [ + "heck 0.3.3", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sea-strum" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "391d06a6007842cfe79ac6f7f53911b76dfd69fc9a6769f1cf6569d12ce20e1b" +dependencies = [ + "sea-strum_macros", +] + +[[package]] +name = "sea-strum_macros" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69b4397b825df6ccf1e98bcdabef3bbcfc47ff5853983467850eeab878384f21" +dependencies = [ + "heck 0.3.3", + "proc-macro2", + "quote", + "rustversion", + "syn", +] + +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + +[[package]] +name = "security-framework" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" + +[[package]] +name = "serde" +version = "1.0.152" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.152" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b04f22b563c91331a10074bda3dd5492e3cc39d56bd557e91c0af42b6c7341" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25bf4a5a814902cd1014dbccfa4d4560fb8432c779471e96e035602519f82eef" +dependencies = [ + "base64 0.13.1", + "chrono", + "hex", + "indexmap", + "serde", + "serde_json", + "serde_with_macros", + "time 0.3.17", +] + +[[package]] +name = "serde_with_macros" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3452b4c0f6c1e357f73fdb87cd1efabaa12acf328c7a528e252893baeb3f4aa" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_yaml" +version = "0.9.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92b5b431e8907b50339b51223b97d102db8d987ced36f6e4d03621db9316c834" +dependencies = [ + "indexmap", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", + "sha2-asm", +] + +[[package]] +name = "sha2-asm" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf27176fb5d15398e3a479c652c20459d9dac830dedd1fa55b42a77dbcdbfcea" +dependencies = [ + "cc", +] + +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" + +[[package]] +name = "signal-hook-registry" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] + +[[package]] +name = "simd-abstraction" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cadb29c57caadc51ff8346233b5cec1d240b68ce55cf1afc764818791876987" +dependencies = [ + "outref", +] + +[[package]] +name = "simple_asn1" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror", + "time 0.3.17", +] + +[[package]] +name = "slab" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" + +[[package]] +name = "socket2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" +dependencies = [ + "lock_api", +] + +[[package]] +name = "sqlformat" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87e292b4291f154971a43c3774364e2cbcaec599d3f5bf6fa9d122885dbc38a" +dependencies = [ + "itertools", + "nom", + "unicode_categories", +] + +[[package]] +name = "sqlx" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9249290c05928352f71c077cc44a464d880c63f26f7534728cca008e135c0428" +dependencies = [ + "sqlx-core", + "sqlx-macros", +] + +[[package]] +name = "sqlx-core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbc16ddba161afc99e14d1713a453747a2b07fc097d2009f4c300ec99286105" +dependencies = [ + "ahash", + "atoi", + "base64 0.13.1", + "bitflags", + "byteorder", + "bytes", + "chrono", + "crc", + "crossbeam-queue", + "dirs", + "dotenvy", + "either", + "event-listener", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "hashlink", + "hex", + "hkdf", + "hmac", + "indexmap", + "itoa", + "libc", + "libsqlite3-sys", + "log", + "md-5", + "memchr", + "num-bigint", + "once_cell", + "paste", + "percent-encoding", + "rand", + "rust_decimal", + "rustls", + "rustls-pemfile", + "serde", + "serde_json", + "sha1", + "sha2", + "smallvec", + "sqlformat", + "sqlx-rt", + "stringprep", + "thiserror", + "time 0.3.17", + "tokio-stream", + "url", + "uuid", + "webpki-roots", + "whoami", +] + +[[package]] +name = "sqlx-macros" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b850fa514dc11f2ee85be9d055c512aa866746adfacd1cb42d867d68e6a5b0d9" +dependencies = [ + "dotenvy", + "either", + "heck 0.4.0", + "once_cell", + "proc-macro2", + "quote", + "serde_json", + "sha2", + "sqlx-core", + "sqlx-rt", + "syn", + "url", +] + +[[package]] +name = "sqlx-rt" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24c5b2d25fa654cc5f841750b8e1cdedbe21189bf9a9382ee90bfa9dd3562396" +dependencies = [ + "once_cell", + "tokio", + "tokio-rustls", +] + +[[package]] +name = "stringprep" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "subtle" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" + +[[package]] +name = "syn" +version = "1.0.107" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +dependencies = [ + "cfg-if", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" + +[[package]] +name = "thiserror" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +dependencies = [ + "once_cell", +] + +[[package]] +name = "time" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", +] + +[[package]] +name = "time" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +dependencies = [ + "itoa", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + +[[package]] +name = "time-macros" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +dependencies = [ + "time-core", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + +[[package]] +name = "tokio" +version = "1.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46" +dependencies = [ + "autocfg", + "bytes", + "libc", + "memchr", + "mio", + "num_cpus", + "parking_lot 0.12.1", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "tracing", + "windows-sys 0.42.0", +] + +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-macros" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-rustls" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +dependencies = [ + "rustls", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-stream" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-test" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53474327ae5e166530d17f2d956afcb4f8a004de581b3cae10f12006bc8163e3" +dependencies = [ + "async-stream", + "bytes", + "futures-core", + "tokio", + "tokio-stream", +] + +[[package]] +name = "tokio-util" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "toml" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +dependencies = [ + "serde", +] + +[[package]] +name = "tonic" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.13.1", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "prost-derive", + "tokio", + "tokio-stream", + "tokio-util", + "tower", + "tower-layer", + "tower-service", + "tracing", + "tracing-futures", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" +dependencies = [ + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +dependencies = [ + "cfg-if", + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project", + "tracing", +] + +[[package]] +name = "tracing-log" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "try-lock" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" + +[[package]] +name = "typenum" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" + +[[package]] +name = "unicode-bidi" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" + +[[package]] +name = "unicode-ident" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" + +[[package]] +name = "unicode-width" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" + +[[package]] +name = "unicode-xid" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc7ed8ba44ca06be78ea1ad2c3682a43349126c8818054231ee6f4748012aed2" + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "url" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "uuid" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" +dependencies = [ + "getrandom", + "serde", +] + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ + "log", + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" + +[[package]] +name = "web-sys" +version = "0.3.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.22.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" +dependencies = [ + "webpki", +] + +[[package]] +name = "which" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +dependencies = [ + "either", + "libc", + "once_cell", +] + +[[package]] +name = "whoami" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6631b6a2fd59b1841b622e8f1a7ad241ef0a46f2d580464ce8140ac94cbd571" +dependencies = [ + "bumpalo", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wildmatch" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee583bdc5ff1cf9db20e9db5bb3ff4c3089a8f6b8b31aff265c9aba85812db86" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", +] + +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.0", + "windows_i686_gnu 0.42.0", + "windows_i686_msvc 0.42.0", + "windows_x86_64_gnu 0.42.0", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" + +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" + +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" + +[[package]] +name = "winreg" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +dependencies = [ + "winapi", +] + +[[package]] +name = "xdg" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4583db5cbd4c4c0303df2d15af80f0539db703fa1c68802d4cbbd2dd0f88f6" +dependencies = [ + "dirs", +] + +[[package]] +name = "xmlparser" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d25c75bf9ea12c4040a97f829154768bbbce366287e2dc044af160cd79a13fd" + +[[package]] +name = "xz2" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" +dependencies = [ + "lzma-sys", +] + +[[package]] +name = "zeroize" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" + +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.4+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fa202f2ef00074143e219d15b62ffc317d17cc33909feac471c044087cad7b0" +dependencies = [ + "cc", + "libc", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..5cd6c2f --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,11 @@ +[workspace] + +members = [ + "attic", + "client", + "server", +] + +[profile.dev] +opt-level = 2 +incremental = true diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..8b84cb2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,13 @@ +Copyright 2022 Zhaofeng Li and the Attic contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..e056966 --- /dev/null +++ b/README.md @@ -0,0 +1,35 @@ +# Attic + +**Attic** is a self-hostable Nix Binary Cache server backed by an S3-compatible storage provider. +It has support for global deduplication and garbage collection. + +Attic is an early prototype. + +``` +⚙️ Pushing 5 paths to "demo" on "local" (566 already cached, 2001 in upstream)... +✅ gnvi1x7r8kl3clzx0d266wi82fgyzidv-steam-run-fhs (29.69 MiB/s) +✅ rw7bx7ak2p02ljm3z4hhpkjlr8rzg6xz-steam-fhs (30.56 MiB/s) +✅ y92f9y7qhkpcvrqhzvf6k40j6iaxddq8-0p36ammvgyr55q9w75845kw4fw1c65ln-source (19.96 MiB/s) +🕒 vscode-1.74.2 ███████████████████████████████████████ 345.66 MiB (41.32 MiB/s) +🕓 zoom-5.12.9.367 ███████████████████████████ 329.36 MiB (39.47 MiB/s) +``` + +## Try it out (15 minutes) + +Let's [spin up Attic](https://docs.attic.rs/tutorial.html) in just 15 minutes. +And yes, it works on macOS too! + +## Goals + +- **Multi-Tenancy**: Create a private cache for yourself, and one for friends and co-workers. Tenants are mutually untrusting and cannot pollute the views of other caches. +- **Global Deduplication**: Individual caches (tenants) are simply restricted views of the content-addressed global cache. When paths are uploaded, a mapping is created to grant the local cache access to the global NAR. +- **Managed Signing**: Signing is done on-the-fly by the server when store paths are fetched. The user pushing store paths does not have access to the signing key. +- **Scalabilty**: Attic can be easily replicated. It's designed to be deployed to serverless platforms like fly.io but also works nicely in a single-machine setup. +- **Garbage Collection**: Unused store paths can be garbage-collected in an LRU manner. + +## Licensing + +Attic is available under the **Apache License, Version 2.0**. +See `LICENSE` for details. + +By contributing to the project, you agree to license your work under the aforementioned licenses. diff --git a/attic/Cargo.toml b/attic/Cargo.toml new file mode 100644 index 0000000..5f4f888 --- /dev/null +++ b/attic/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "attic" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +base64 = "0.20.0" +displaydoc = "0.2.3" +digest = "0.10.6" +ed25519-compact = "2.0.4" +futures = "0.3.25" +hex = "0.4.3" +lazy_static = "1.4.0" +log = "0.4.17" +nix-base32 = { git = "https://github.com/zhaofengli/nix-base32.git", rev = "b850c6e9273d1c39bd93abb704a53345f5be92eb" } +regex = "1.7.0" +serde = { version = "1.0.151", features = ["derive"] } +serde_yaml = "0.9.16" +sha2 = "0.10.6" +tempfile = "3" +wildmatch = "2.1.1" +xdg = "2.4.1" + +# Native libnixstore bindings. +cxx = { version = "1.0", optional = true } + +[dependencies.tokio] +version = "1.23.0" +features = [ + "full", +] + +[dev-dependencies] +serde_json = "1.0.91" +tokio-test = "0.4.2" + +[build-dependencies] +bindgen = { version = "0.63.0", optional = true } +cxx-build = { version = "1.0", optional = true } +pkg-config = "0.3.26" + +[features] +default = [ "nix_store" ] + +# Native libnixstore bindings. +# +# When disabled, the native Rust portions of nix_store can still be used. +nix_store = [ "cxx", "bindgen", "cxx-build" ] diff --git a/attic/build.rs b/attic/build.rs new file mode 100644 index 0000000..571a831 --- /dev/null +++ b/attic/build.rs @@ -0,0 +1,98 @@ +//! Build script. +//! +//! We link against libnixstore to perform actions on the Nix Store. + +#[cfg(feature = "nix_store")] +use bindgen::callbacks::{EnumVariantValue, ParseCallbacks}; + +fn main() { + #[cfg(feature = "nix_store")] + build_bridge(); + + #[cfg(feature = "nix_store")] + run_bindgen(); +} + +#[cfg(feature = "nix_store")] +#[derive(Debug)] +struct TransformNix; + +#[cfg(feature = "nix_store")] +impl ParseCallbacks for TransformNix { + fn enum_variant_name( + &self, + enum_name: Option<&str>, + original_variant_name: &str, + _variant_value: EnumVariantValue, + ) -> Option { + match enum_name { + Some("HashType") => { + let t = match original_variant_name { + "htUnknown" => "Unknown", + "htMD5" => "Md5", + "htSHA1" => "Sha1", + "htSHA256" => "Sha256", + "htSHA512" => "Sha512", + x => panic!("Unknown hash type {} - Add it in build.rs", x), + }; + Some(t.to_owned()) + } + _ => None, + } + } + + fn include_file(&self, filename: &str) { + println!("cargo:rerun-if-changed={}", filename); + } +} + +#[cfg(feature = "nix_store")] +fn build_bridge() { + cxx_build::bridge("src/nix_store/bindings/mod.rs") + .file("src/nix_store/bindings/nix.cpp") + .flag("-std=c++17") + .flag("-O2") + .flag("-include") + .flag("nix/config.h") + .compile("nixbinding"); + + println!("cargo:rerun-if-changed=src/nix_store/bindings"); +} + +#[cfg(feature = "nix_store")] +fn run_bindgen() { + use std::env; + use std::path::PathBuf; + + let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); + + let headers = vec!["src/nix_store/bindings/bindgen.hpp"]; + + let mut builder = bindgen::Builder::default() + .clang_arg("-std=c++17") + .clang_arg("-include") + .clang_arg("nix/config.h") + .opaque_type("std::.*") + .allowlist_type("nix::Hash") + .rustified_enum("nix::HashType") + .disable_name_namespacing() + .layout_tests(false) + .parse_callbacks(Box::new(TransformNix)); + + for header in headers { + builder = builder.header(header); + println!("cargo:rerun-if-changed={}", header); + } + + let bindings = builder.generate().expect("Failed to generate Nix bindings"); + + bindings + .write_to_file(out_path.join("bindgen.rs")) + .expect("Failed to write bindings"); + + // the -l flags must be after -lnixbinding + pkg_config::Config::new() + .atleast_version("2.4") + .probe("nix-store") + .unwrap(); +} diff --git a/attic/src/api/mod.rs b/attic/src/api/mod.rs new file mode 100644 index 0000000..a3a6d96 --- /dev/null +++ b/attic/src/api/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/attic/src/api/v1/cache_config.rs b/attic/src/api/v1/cache_config.rs new file mode 100644 index 0000000..df14df3 --- /dev/null +++ b/attic/src/api/v1/cache_config.rs @@ -0,0 +1,136 @@ +//! Cache configuration endpoint. + +use serde::{Deserialize, Serialize}; + +use crate::signing::NixKeypair; + +#[derive(Debug, Serialize, Deserialize)] +pub struct CreateCacheRequest { + /// The keypair of the cache. + pub keypair: KeypairConfig, + + /// Whether the cache is public or not. + /// + /// Anonymous clients are implicitly granted the "pull" + /// permission to public caches. + pub is_public: bool, + + /// The Nix store path this binary cache uses. + /// + /// This is usually `/nix/store`. + pub store_dir: String, + + /// The priority of the binary cache. + /// + /// A lower number denotes a higher priority. + /// has a priority of 40. + pub priority: i32, + + /// A list of signing key names of upstream caches. + /// + /// The list serves as a hint to clients to avoid uploading + /// store paths signed with such keys. + pub upstream_cache_key_names: Vec, +} + +/// Configuration of a cache. +/// +/// Specifying `None` means using the default value or +/// keeping the current value. +#[derive(Debug, Serialize, Deserialize)] +pub struct CacheConfig { + /// The keypair of the cache. + /// + /// The keypair is never returned by the server, but can + /// be configured by the client. + #[serde(skip_serializing_if = "Option::is_none")] + pub keypair: Option, + + /// The Nix binary cache endpoint of the cache. + /// + /// This is the endpoint that should be added to `nix.conf`. + /// This is read-only and may not be available. + #[serde(skip_serializing_if = "Option::is_none")] + pub substituter_endpoint: Option, + + /// The Attic API endpoint. + /// + /// This is read-only and may not be available. + #[serde(skip_serializing_if = "Option::is_none")] + pub api_endpoint: Option, + + /// The public key of the cache, in the canonical format used by Nix. + /// + /// This is read-only and may not be available. + #[serde(skip_serializing_if = "Option::is_none")] + pub public_key: Option, + + /// Whether the cache is public or not. + /// + /// Anonymous clients are implicitly granted the "pull" + /// permission to public caches. + #[serde(skip_serializing_if = "Option::is_none")] + pub is_public: Option, + + /// The Nix store path this binary cache uses. + /// + /// This is usually `/nix/store`. + #[serde(skip_serializing_if = "Option::is_none")] + pub store_dir: Option, + + /// The priority of the binary cache. + /// + /// A lower number denotes a higher priority. + /// has a priority of 40. + #[serde(skip_serializing_if = "Option::is_none")] + pub priority: Option, + + /// A list of signing key names of upstream caches. + /// + /// The list serves as a hint to clients to avoid uploading + /// store paths signed with such keys. + #[serde(skip_serializing_if = "Option::is_none")] + pub upstream_cache_key_names: Option>, + + /// The retention period of the cache. + #[serde(skip_serializing_if = "Option::is_none")] + pub retention_period: Option, +} + +/// Configuaration of a keypair. +#[derive(Debug, Serialize, Deserialize)] +pub enum KeypairConfig { + /// Use a randomly-generated keypair. + Generate, + + /// Use a client-specified keypair. + Keypair(NixKeypair), +} + +/// Configuration of retention period. +#[derive(Debug, Serialize, Deserialize)] +pub enum RetentionPeriodConfig { + /// Use the global default. + Global, + + /// Specify a retention period in seconds. + /// + /// If 0, then time-based garbage collection is disabled. + Period(u32), +} + +impl CacheConfig { + pub fn blank() -> Self { + Self { + keypair: None, + substituter_endpoint: None, + api_endpoint: None, + public_key: None, + is_public: None, + store_dir: None, + priority: None, + upstream_cache_key_names: None, + retention_period: None, + } + } +} diff --git a/attic/src/api/v1/get_missing_paths.rs b/attic/src/api/v1/get_missing_paths.rs new file mode 100644 index 0000000..e6cc466 --- /dev/null +++ b/attic/src/api/v1/get_missing_paths.rs @@ -0,0 +1,25 @@ +//! get-missing-paths v1 +//! +//! `POST /_api/v1/get-missing-paths` +//! +//! Requires "push" permission. + +use serde::{Deserialize, Serialize}; + +use crate::cache::CacheName; +use crate::nix_store::StorePathHash; + +#[derive(Debug, Serialize, Deserialize)] +pub struct GetMissingPathsRequest { + /// The name of the cache. + pub cache: CacheName, + + /// The list of store paths. + pub store_path_hashes: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GetMissingPathsResponse { + /// A list of paths that are not in the cache. + pub missing_paths: Vec, +} diff --git a/attic/src/api/v1/mod.rs b/attic/src/api/v1/mod.rs new file mode 100644 index 0000000..6c0cc4e --- /dev/null +++ b/attic/src/api/v1/mod.rs @@ -0,0 +1,3 @@ +pub mod cache_config; +pub mod get_missing_paths; +pub mod upload_path; diff --git a/attic/src/api/v1/upload_path.rs b/attic/src/api/v1/upload_path.rs new file mode 100644 index 0000000..03c2667 --- /dev/null +++ b/attic/src/api/v1/upload_path.rs @@ -0,0 +1,52 @@ +use serde::{Deserialize, Serialize}; + +use crate::cache::CacheName; +use crate::hash::Hash; +use crate::nix_store::StorePathHash; + +/// NAR information associated with a upload. +/// +/// This is JSON-serialized as the value of the `X-Attic-Nar-Info` header. +/// The (client-compressed) NAR is the PUT body. +/// +/// Regardless of client compression, the server will always decompress +/// the NAR to validate the NAR hash before applying the server-configured +/// compression again. +#[derive(Debug, Serialize, Deserialize)] +pub struct UploadPathNarInfo { + /// The name of the binary cache to upload to. + pub cache: CacheName, + + /// The hash portion of the store path. + pub store_path_hash: StorePathHash, + + /// The full store path being cached, including the store directory. + pub store_path: String, + + /// Other store paths this object directly refereces. + pub references: Vec, + + /// The system this derivation is built for. + pub system: Option, + + /// The derivation that produced this object. + pub deriver: Option, + + /// The signatures of this object. + pub sigs: Vec, + + /// The CA field of this object. + pub ca: Option, + + /// The hash of the NAR. + /// + /// It must begin with `sha256:` with the SHA-256 hash in the + /// hexadecimal format (64 hex characters). + /// + /// This is informational and the server always validates the supplied + /// hash. + pub nar_hash: Hash, + + /// The size of the NAR. + pub nar_size: usize, +} diff --git a/attic/src/cache.rs b/attic/src/cache.rs new file mode 100644 index 0000000..a946d22 --- /dev/null +++ b/attic/src/cache.rs @@ -0,0 +1,266 @@ +//! Binary caches. +//! +//! ## Cache Naming +//! +//! Cache names can be up to 50 characters long and can only consist of +//! ASCII alphanumeric characters (A-Za-z0-9), dashes ('-'), underscores +//! ('_'), and plus signs ('+'). They must also start with an alphanumeric +//! character (e.g., "_cache" is _not_ a valid cache name). +//! +/// The plus sign is intended to be used as the delimiter between a +/// namespace and a user-given name (e.g., `zhaofengli+shared`). +use std::hash::{Hash, Hasher}; +use std::str::FromStr; + +use lazy_static::lazy_static; +use regex::Regex; +use serde::{de, Deserialize, Serialize}; +use wildmatch::WildMatch; + +use crate::error::{AtticError, AtticResult}; + +/// The maximum allowable length of a cache name. +pub const MAX_NAME_LENGTH: usize = 50; + +lazy_static! { + static ref CACHE_NAME_REGEX: Regex = Regex::new(r"^[A-Za-z0-9][A-Za-z0-9-_+]{0,49}$").unwrap(); + static ref CACHE_NAME_PATTERN_REGEX: Regex = + Regex::new(r"^[A-Za-z0-9*][A-Za-z0-9-_+*]{0,49}$").unwrap(); +} + +/// The name of a binary cache. +/// +/// Names can only consist of ASCII alphanumeric characters (A-Za-z0-9), +/// dashes ('-'), underscores ('_'), and plus signs ('+'). +#[derive(Serialize, Deserialize, Clone, Debug, Hash, Eq, PartialEq)] +#[serde(transparent)] +pub struct CacheName(#[serde(deserialize_with = "CacheName::deserialize")] String); + +/// A pattern of cache names. +/// +/// The keys in the custom JWT claim are patterns that can +/// be matched against cache names. Thus patterns can only be created +/// by trusted entities. +/// +/// In addition to what's allowed in cache names, patterns can include +/// wildcards ('*') to enable a limited form of namespace-based access +/// control. +/// +/// This is particularly useful in conjunction with the `cache_create` +/// permission which allows the user to autonomously create caches under +/// their own namespace (e.g., `zhaofengli+*`). +#[derive(Serialize, Clone, Debug)] +#[serde(transparent)] +pub struct CacheNamePattern { + pattern: String, + + /// The pattern matcher. + /// + /// If None, then `pattern` itself will be used to match exactly. + /// This is a special case for converting a CacheName to a + /// CacheNamePattern. + /// + /// It's possible to combine the two structs into one, but the goal + /// is to have strong delineation between them enforced by the type + /// system (you can't attempt to call `matches` at all on a regular + /// CacheName). + #[serde(skip)] + matcher: Option, +} + +impl CacheName { + /// Creates a cache name from a String. + pub fn new(name: String) -> AtticResult { + validate_cache_name(&name, false)?; + Ok(Self(name)) + } + + /// Returns the string. + pub fn as_str(&self) -> &str { + &self.0 + } + + pub fn to_string(&self) -> String { + self.0.clone() + } + + /// Returns the corresponding pattern that only matches this cache. + pub fn to_pattern(&self) -> CacheNamePattern { + CacheNamePattern { + pattern: self.0.clone(), + matcher: None, + } + } + + /// Deserializes a potentially-invalid cache name. + fn deserialize<'de, D>(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + use de::Error; + String::deserialize(deserializer).and_then(|s| { + validate_cache_name(&s, false).map_err(|e| Error::custom(e.to_string()))?; + Ok(s) + }) + } +} + +impl FromStr for CacheName { + type Err = AtticError; + + fn from_str(name: &str) -> AtticResult { + Self::new(name.to_owned()) + } +} + +impl CacheNamePattern { + /// Creates a cache name pattern from a String. + pub fn new(pattern: String) -> AtticResult { + validate_cache_name(&pattern, true)?; + let matcher = WildMatch::new(&pattern); + + Ok(Self { + pattern, + matcher: Some(matcher), + }) + } + + /// Tests if the pattern matches a name. + pub fn matches(&self, name: &CacheName) -> bool { + match &self.matcher { + Some(matcher) => matcher.matches(name.as_str()), + None => self.pattern == name.as_str(), + } + } +} + +impl FromStr for CacheNamePattern { + type Err = AtticError; + + fn from_str(pattern: &str) -> AtticResult { + Self::new(pattern.to_owned()) + } +} + +impl<'de> Deserialize<'de> for CacheNamePattern { + /// Deserializes a potentially-invalid cache name pattern. + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + use de::Error; + let pattern = String::deserialize(deserializer).and_then(|s| { + validate_cache_name(&s, true).map_err(|e| Error::custom(e.to_string()))?; + Ok(s) + })?; + + let matcher = WildMatch::new(&pattern); + + Ok(Self { + pattern, + matcher: Some(matcher), + }) + } +} + +impl Hash for CacheNamePattern { + fn hash(&self, state: &mut H) { + self.pattern.hash(state); + } +} + +impl PartialEq for CacheNamePattern { + fn eq(&self, other: &Self) -> bool { + self.pattern == other.pattern + } +} + +impl Eq for CacheNamePattern {} + +fn validate_cache_name(name: &str, allow_wildcards: bool) -> AtticResult<()> { + let valid = if allow_wildcards { + CACHE_NAME_PATTERN_REGEX.is_match(name) + } else { + CACHE_NAME_REGEX.is_match(name) + }; + + if valid { + Ok(()) + } else { + Err(AtticError::InvalidCacheName { + name: name.to_owned(), + }) + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + + macro_rules! cache { + ($n:expr) => { + CacheName::new($n.to_string()).unwrap() + }; + } + + pub(crate) use cache; + + #[test] + fn test_cache_name() { + let names = vec![ + "valid-name", + "Another_Valid_Name", + "plan9", + "username+cache", + ]; + + for name in names { + assert_eq!(name, CacheName::new(name.to_string()).unwrap().as_str()); + + assert_eq!( + name, + serde_json::from_str::(&format!("\"{}\"", name)) + .unwrap() + .as_str(), + ); + } + + let bad_names = vec![ + "", + "not a valid name", + "team-*", + "这布盒里.webp", + "-ers", + "and-you-can-have-it-all-my-empire-of-dirt-i-will-let-you-down-i-will-make-you-hurt", + ]; + + for name in bad_names { + CacheName::new(name.to_string()).unwrap_err(); + serde_json::from_str::(&format!("\"{}\"", name)).unwrap_err(); + } + } + + #[test] + fn test_cache_name_pattern() { + let pattern = CacheNamePattern::new("team-*".to_string()).unwrap(); + assert!(pattern.matches(&cache! { "team-" })); + assert!(pattern.matches(&cache! { "team-abc" })); + assert!(!pattern.matches(&cache! { "abc-team" })); + + let pattern = CacheNamePattern::new("no-wildcard".to_string()).unwrap(); + assert!(pattern.matches(&cache! { "no-wildcard" })); + assert!(!pattern.matches(&cache! { "no-wildcard-xxx" })); + assert!(!pattern.matches(&cache! { "xxx-no-wildcard" })); + + let pattern = CacheNamePattern::new("*".to_string()).unwrap(); + assert!(pattern.matches(&cache! { "literally-anything" })); + + CacheNamePattern::new("*-but-normal-restrictions-still-apply!!!".to_string()).unwrap_err(); + + // eq + let pattern1 = CacheNamePattern::new("same-pattern".to_string()).unwrap(); + let pattern2 = CacheNamePattern::new("same-pattern".to_string()).unwrap(); + assert_eq!(pattern1, pattern2); + assert_ne!(pattern, pattern1); + } +} diff --git a/attic/src/error.rs b/attic/src/error.rs new file mode 100644 index 0000000..7c5e664 --- /dev/null +++ b/attic/src/error.rs @@ -0,0 +1,84 @@ +//! Error handling. + +use std::error::Error as StdError; +use std::io; +use std::path::PathBuf; + +use displaydoc::Display; + +pub type AtticResult = Result; + +/// An error. +#[derive(Debug, Display)] +pub enum AtticError { + /// Invalid store path {path:?}: {reason} + InvalidStorePath { path: PathBuf, reason: &'static str }, + + /// Invalid store path base name {base_name:?}: {reason} + InvalidStorePathName { + base_name: PathBuf, + reason: &'static str, + }, + + /// Invalid store path hash "{hash}": {reason} + InvalidStorePathHash { hash: String, reason: &'static str }, + + /// Invalid cache name "{name}" + InvalidCacheName { name: String }, + + /// Signing error: {0} + SigningError(super::signing::Error), + + /// Hashing error: {0} + HashError(super::hash::Error), + + /// I/O error: {error}. + IoError { error: io::Error }, + + /// Unknown C++ exception: {exception}. + CxxError { exception: String }, +} + +impl AtticError { + pub fn name(&self) -> &'static str { + match self { + Self::InvalidStorePath { .. } => "InvalidStorePath", + Self::InvalidStorePathName { .. } => "InvalidStorePathName", + Self::InvalidStorePathHash { .. } => "InvalidStorePathHash", + Self::InvalidCacheName { .. } => "InvalidCacheName", + Self::SigningError(_) => "SigningError", + Self::HashError(_) => "HashError", + Self::IoError { .. } => "IoError", + Self::CxxError { .. } => "CxxError", + } + } +} + +impl StdError for AtticError {} + +#[cfg(feature = "nix_store")] +impl From for AtticError { + fn from(exception: cxx::Exception) -> Self { + Self::CxxError { + exception: exception.what().to_string(), + } + } +} + +impl From for AtticError { + fn from(error: io::Error) -> Self { + Self::IoError { error } + } +} + +impl From for AtticError { + fn from(error: super::signing::Error) -> Self { + Self::SigningError(error) + } +} + +impl From for AtticError { + fn from(error: super::hash::Error) -> Self { + Self::HashError(error) + } +} diff --git a/attic/src/hash/mod.rs b/attic/src/hash/mod.rs new file mode 100644 index 0000000..33c7c95 --- /dev/null +++ b/attic/src/hash/mod.rs @@ -0,0 +1,153 @@ +//! Hashing utilities. + +#[cfg(test)] +mod tests; + +use displaydoc::Display; +use serde::{de, ser, Deserialize, Serialize}; +use sha2::{Digest, Sha256}; + +use crate::error::AtticResult; + +#[cfg(feature = "nix_store")] +use crate::nix_store::{FfiHash, FfiHashType}; + +/// A hash. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Hash { + /// An SHA-256 hash. + Sha256([u8; 32]), +} + +/// A hashing error. +#[derive(Debug, Display)] +pub enum Error { + /// The string lacks a colon separator. + NoColonSeparator, + + /// Hash algorithm {0} is not supported. + UnsupportedHashAlgorithm(String), + + /// Invalid base16 hash: {0} + InvalidBase16Hash(hex::FromHexError), + + /// Invalid base32 hash. + InvalidBase32Hash, + + /// Invalid length for {typ} string: Must be either {base16_len} (hexadecimal) or {base32_len} (base32), got {actual}. + InvalidHashStringLength { + typ: &'static str, + base16_len: usize, + base32_len: usize, + actual: usize, + }, +} + +impl Hash { + /// Convenience function to generate a SHA-256 hash from a slice. + pub fn sha256_from_bytes(bytes: &[u8]) -> Self { + let mut hasher = Sha256::new(); + hasher.update(bytes); + Self::Sha256(hasher.finalize().into()) + } + + /// Parses a typed representation of a hash. + pub fn from_typed(s: &str) -> AtticResult { + let colon = s.find(':').ok_or(Error::NoColonSeparator)?; + + let (typ, rest) = s.split_at(colon); + let hash = &rest[1..]; + + match typ { + "sha256" => { + let v = decode_hash(hash, "SHA-256", 32)?; + Ok(Self::Sha256(v.try_into().unwrap())) + } + _ => Err(Error::UnsupportedHashAlgorithm(typ.to_owned()).into()), + } + } + + /// Returns the hash in Nix-specific Base32 format, with the hash type prepended. + pub fn to_typed_base32(&self) -> String { + format!("{}:{}", self.hash_type(), self.to_base32()) + } + + /// Returns the hash in hexadecimal format, with the hash type prepended. + /// + /// This is the canonical representation of hashes in the Attic database. + pub fn to_typed_base16(&self) -> String { + format!("{}:{}", self.hash_type(), hex::encode(self.data())) + } + + fn data(&self) -> &[u8] { + match self { + Self::Sha256(d) => d, + } + } + + fn hash_type(&self) -> &'static str { + match self { + Self::Sha256(_) => "sha256", + } + } + + /// Returns the hash in Nix-specific Base32 format. + fn to_base32(&self) -> String { + nix_base32::to_nix_base32(self.data()) + } + + #[cfg(feature = "nix_store")] + pub(super) fn from_ffi_hash(hash: FfiHash) -> AtticResult { + match hash.type_ { + FfiHashType::Sha256 => Ok(Self::Sha256(hash.hash[..32].try_into().unwrap())), + typ => Err(Error::UnsupportedHashAlgorithm(typ.as_str().to_owned()).into()), + } + } +} + +impl<'de> Deserialize<'de> for Hash { + /// Deserializes a typed hash string. + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + use de::Error; + + String::deserialize(deserializer) + .and_then(|s| Self::from_typed(&s).map_err(|e| Error::custom(e.to_string()))) + } +} + +impl Serialize for Hash { + /// Serializes a hash into a hexadecimal hash string. + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + serializer.serialize_str(&self.to_typed_base16()) + } +} + +/// Decodes a base16 or base32 encoded hash containing a specified number of bytes. +fn decode_hash<'s>(s: &'s str, typ: &'static str, expected_bytes: usize) -> AtticResult> { + let base16_len = expected_bytes * 2; + let base32_len = (expected_bytes * 8 - 1) / 5 + 1; + + let v = if s.len() == base16_len { + hex::decode(s).map_err(Error::InvalidBase16Hash)? + } else if s.len() == base32_len { + nix_base32::from_nix_base32(s).ok_or(Error::InvalidBase32Hash)? + } else { + return Err(Error::InvalidHashStringLength { + typ, + base16_len, + base32_len, + actual: s.len(), + } + .into()); + }; + + assert!(v.len() == expected_bytes); + + Ok(v) +} diff --git a/attic/src/hash/tests/.gitattributes b/attic/src/hash/tests/.gitattributes new file mode 100644 index 0000000..3a4be7f --- /dev/null +++ b/attic/src/hash/tests/.gitattributes @@ -0,0 +1 @@ +blob -text diff --git a/attic/src/hash/tests/blob b/attic/src/hash/tests/blob new file mode 100644 index 0000000..afbfab0 --- /dev/null +++ b/attic/src/hash/tests/blob @@ -0,0 +1,15 @@ +⊂_ヽ +  \\ _ +   \( •_•) F +    < ⌒ヽ A +   /   へ\ B +   /  / \\ U +   レ ノ   ヽ_つ L +  / / O +  / /| U + ( (ヽ S + | |、\ + | 丿 \ ⌒) + | |  ) / +`ノ )  Lノ +(_/ diff --git a/attic/src/hash/tests/mod.rs b/attic/src/hash/tests/mod.rs new file mode 100644 index 0000000..8bc9453 --- /dev/null +++ b/attic/src/hash/tests/mod.rs @@ -0,0 +1,62 @@ +use super::*; + +use crate::error::AtticError; +use crate::nix_store::tests::test_nar; + +const BLOB: &[u8] = include_bytes!("blob"); + +#[test] +fn test_basic() { + let hash = Hash::sha256_from_bytes(BLOB); + + let expected_base16 = "sha256:df3404eaf1481506db9ca155e0a871d5b4d22e62a96961e8bf4ad1a8ca525330"; + assert_eq!(expected_base16, hash.to_typed_base16()); + + let expected_base32 = "sha256:0c2kab5ailaapzl62sd9c8pd5d6mf6lf0md1kkdhc5a8y7m08d6z"; + assert_eq!(expected_base32, hash.to_typed_base32()); +} + +#[test] +fn test_nar_hash() { + let nar = test_nar::NO_DEPS; + let hash = Hash::sha256_from_bytes(nar.nar()); + + let expected_base32 = "sha256:0hjszid30ak3rkzvc3m94c3risg8wz2hayy100c1fg92bjvvvsms"; + assert_eq!(expected_base32, hash.to_typed_base32()); +} + +#[test] +fn test_from_typed() { + let base16 = "sha256:baeabdb75c223d171800c17b05c5e7e8e9980723a90eb6ffcc632a305afc5a42"; + let base32 = "sha256:0hjszid30ak3rkzvc3m94c3risg8wz2hayy100c1fg92bjvvvsms"; + + assert_eq!( + Hash::from_typed(base16).unwrap(), + Hash::from_typed(base32).unwrap() + ); + + assert!(matches!( + Hash::from_typed("sha256"), + Err(AtticError::HashError(Error::NoColonSeparator)) + )); + + assert!(matches!( + Hash::from_typed("sha256:"), + Err(AtticError::HashError(Error::InvalidHashStringLength { .. })) + )); + + assert!(matches!( + Hash::from_typed("sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"), + Err(AtticError::HashError(Error::InvalidBase32Hash)) + )); + + assert!(matches!( + Hash::from_typed("sha256:gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg"), + Err(AtticError::HashError(Error::InvalidBase16Hash(_))) + )); + + assert!(matches!( + Hash::from_typed("md5:invalid"), + Err(AtticError::HashError(Error::UnsupportedHashAlgorithm(alg))) if alg == "md5" + )); +} diff --git a/attic/src/lib.rs b/attic/src/lib.rs new file mode 100644 index 0000000..ec81bdf --- /dev/null +++ b/attic/src/lib.rs @@ -0,0 +1,29 @@ +//! The Attic Library. + +#![deny( + asm_sub_register, + deprecated, + missing_abi, + unsafe_code, + unused_macros, + unused_must_use, + unused_unsafe +)] +#![deny(clippy::from_over_into, clippy::needless_question_mark)] +#![cfg_attr( + not(debug_assertions), + deny(unused_imports, unused_mut, unused_variables,) +)] + +pub mod api; +pub mod cache; +pub mod error; +pub mod hash; +pub mod mime; +pub mod nix_store; +pub mod signing; +pub mod stream; +pub mod testing; +pub mod util; + +pub use error::{AtticError, AtticResult}; diff --git a/attic/src/mime.rs b/attic/src/mime.rs new file mode 100644 index 0000000..fe0903c --- /dev/null +++ b/attic/src/mime.rs @@ -0,0 +1,10 @@ +//! MIME types. + +/// /nix-cache-info +pub const NIX_CACHE_INFO: &str = "text/x-nix-cache-info"; + +/// .narinfo +pub const NARINFO: &str = "text/x-nix-narinfo"; + +/// .nar +pub const NAR: &str = "application/x-nix-nar"; diff --git a/attic/src/nix_store/README.md b/attic/src/nix_store/README.md new file mode 100644 index 0000000..68174c8 --- /dev/null +++ b/attic/src/nix_store/README.md @@ -0,0 +1,14 @@ +# Nix Store Bindings + +This directory contains a set of high-level Rust bindings to `libnixstore`, compatible with `async`/`await` semantics. +We currently target Nix 2.4+. + +## Why? + +With this wrapper, now you can do things like: + +```rust +let store = NixStore::connect()?; +let store_path = store.parse_store_path("/nix/store/ia70ss13m22znbl8khrf2hq72qmh5drr-ruby-2.7.5")?; +let nar_stream = store.nar_from_path(store_path); # AsyncWrite +``` diff --git a/attic/src/nix_store/bindings/bindgen.hpp b/attic/src/nix_store/bindings/bindgen.hpp new file mode 100644 index 0000000..2f6f575 --- /dev/null +++ b/attic/src/nix_store/bindings/bindgen.hpp @@ -0,0 +1,2 @@ +#include +#include diff --git a/attic/src/nix_store/bindings/bindgen.rs b/attic/src/nix_store/bindings/bindgen.rs new file mode 100644 index 0000000..14efe5b --- /dev/null +++ b/attic/src/nix_store/bindings/bindgen.rs @@ -0,0 +1,37 @@ +//! Generated by `rust-bindgen`. +//! +//! We use `rust-bindgen` to generate bindings for a limited set of simpler +//! structures. + +#![allow( + dead_code, + deref_nullptr, + improper_ctypes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals +)] + +include!(concat!(env!("OUT_DIR"), "/bindgen.rs")); + +use crate::error::AtticResult; +use crate::hash::Hash as RustHash; + +impl Hash { + /// Converts this into the native Rust version of this hash. + pub fn into_rust(self) -> AtticResult { + RustHash::from_ffi_hash(self) + } +} + +impl HashType { + /// Returns the identifier of the hashing algorithm. + pub fn as_str(&self) -> &'static str { + match self { + Self::Md5 => "md5", + Self::Sha1 => "sha1", + Self::Sha256 => "sha256", + Self::Sha512 => "sha512", + } + } +} diff --git a/attic/src/nix_store/bindings/mod.rs b/attic/src/nix_store/bindings/mod.rs new file mode 100644 index 0000000..9f9c364 --- /dev/null +++ b/attic/src/nix_store/bindings/mod.rs @@ -0,0 +1,272 @@ +//! `libnixstore` Bindings + +mod bindgen; + +use std::cell::UnsafeCell; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use cxx::{type_id, ExternType}; +use futures::stream::{Stream, StreamExt}; +use tokio::io::{AsyncWrite, AsyncWriteExt}; + +use crate::{AtticError, AtticResult}; + +pub use bindgen::{Hash as FfiHash, HashType as FfiHashType}; + +unsafe impl ExternType for FfiHash { + type Id = type_id!("nix::Hash"); + type Kind = cxx::kind::Trivial; +} + +unsafe impl ExternType for FfiHashType { + type Id = type_id!("nix::HashType"); + type Kind = cxx::kind::Trivial; +} + +// The C++ implementation takes care of concurrency +#[repr(transparent)] +pub struct FfiNixStore(UnsafeCell>); + +unsafe impl Send for FfiNixStore {} +unsafe impl Sync for FfiNixStore {} + +impl FfiNixStore { + pub fn store<'a>(&'a self) -> Pin<&'a mut ffi::CNixStore> { + unsafe { + let ptr = self.0.get().as_mut().unwrap(); + ptr.pin_mut() + } + } +} + +/// Obtain a handle to the Nix store. +pub unsafe fn open_nix_store() -> AtticResult { + match ffi::open_nix_store() { + Ok(ptr) => { + let cell = UnsafeCell::new(ptr); + Ok(FfiNixStore(cell)) + } + Err(e) => Err(e.into()), + } +} + +// TODO: Benchmark different implementations +// (tokio, crossbeam, flume) +mod mpsc { + // Tokio + pub use tokio::sync::mpsc::{ + error::SendError, unbounded_channel, UnboundedReceiver, UnboundedSender, + }; +} + +/// Async write request. +#[derive(Debug)] +enum AsyncWriteMessage { + Data(Vec), + Error(String), + Eof, +} + +/// Async write request sender. +#[derive(Clone)] +pub struct AsyncWriteSender { + sender: mpsc::UnboundedSender, +} + +impl AsyncWriteSender { + fn send(&mut self, data: &[u8]) -> Result<(), mpsc::SendError> { + let message = AsyncWriteMessage::Data(Vec::from(data)); + self.sender.send(message) + } + + fn eof(&mut self) -> Result<(), mpsc::SendError> { + let message = AsyncWriteMessage::Eof; + self.sender.send(message) + } + + pub(crate) fn rust_error( + &mut self, + error: impl std::error::Error, + ) -> Result<(), impl std::error::Error> { + let message = AsyncWriteMessage::Error(error.to_string()); + self.sender.send(message) + } +} + +/// A wrapper of the `AsyncWrite` trait for the synchronous Nix C++ land. +pub struct AsyncWriteAdapter { + receiver: mpsc::UnboundedReceiver, + eof: bool, +} + +impl AsyncWriteAdapter { + pub fn new() -> (Self, Box) { + let (sender, receiver) = mpsc::unbounded_channel(); + + let r = Self { + receiver, + eof: false, + }; + let sender = Box::new(AsyncWriteSender { sender }); + + (r, sender) + } + + /// Write everything the sender sends to us. + pub async fn write_all(mut self, mut writer: Box) -> AtticResult<()> { + let writer = writer.as_mut(); + + while let Some(data) = self.next().await { + match data { + Ok(v) => { + writer.write_all(&v).await?; + } + Err(e) => { + return Err(e); + } + } + } + + if !self.eof { + Err(io::Error::from(io::ErrorKind::BrokenPipe).into()) + } else { + Ok(()) + } + } +} + +impl Stream for AsyncWriteAdapter { + type Item = AtticResult>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.receiver.poll_recv(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(Some(message)) => { + use AsyncWriteMessage::*; + match message { + Data(v) => Poll::Ready(Some(Ok(v))), + Error(exception) => { + let error = AtticError::CxxError { exception }; + Poll::Ready(Some(Err(error))) + } + Eof => { + self.eof = true; + Poll::Ready(None) + } + } + } + Poll::Ready(None) => { + if !self.eof { + Poll::Ready(Some(Err(io::Error::from(io::ErrorKind::BrokenPipe).into()))) + } else { + Poll::Ready(None) + } + } + } + } +} + +#[cxx::bridge] +/// Generated by `cxx.rs`. +/// +/// Mid-level wrapper of `libnixstore` implemented in C++. +mod ffi { + extern "Rust" { + type AsyncWriteSender; + fn send(self: &mut AsyncWriteSender, data: &[u8]) -> Result<()>; + fn eof(self: &mut AsyncWriteSender) -> Result<()>; + } + + unsafe extern "C++" { + include!("attic/src/nix_store/bindings/nix.hpp"); + + #[namespace = "nix"] + type Hash = super::FfiHash; + + // ========= + // CNixStore + // ========= + + /// Mid-level wrapper for the Unix Domain Socket Nix Store. + type CNixStore; + + /// Returns the path of the Nix store itself. + fn store_dir(self: Pin<&mut CNixStore>) -> String; + + /* + /// Verifies that a path is indeed in the Nix store, then return the base store path. + /// + /// Use parse_store_path instead. + fn to_store_path(self: Pin<&mut CNixStore>, path: &str) -> Result; + */ + + /// Queries information about a valid path. + fn query_path_info( + self: Pin<&mut CNixStore>, + store_path: &[u8], + ) -> Result>; + + /// Computes the closure of a valid path. + /// + /// If `flip_directions` is true, the set of paths that can reach `store_path` is + /// returned. + fn compute_fs_closure( + self: Pin<&mut CNixStore>, + store_path: &[u8], + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + ) -> Result>>; + + /// Computes the closure of a list of valid paths. + /// + /// This is the multi-path variant of `compute_fs_closure`. + /// If `flip_directions` is true, the set of paths that can reach `store_path` is + /// returned. + /// + /// It's easier and more efficient to just pass a vector of slices + /// instead of wrangling with concrete "extern rust" / "extern C++" + /// types. + fn compute_fs_closure_multi( + self: Pin<&mut CNixStore>, + base_names: &[&[u8]], + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + ) -> Result>>; + + /// Creates a NAR dump from a path. + fn nar_from_path( + self: Pin<&mut CNixStore>, + base_name: Vec, + sender: Box, + ) -> Result<()>; + + /// Obtains a handle to the Nix store. + fn open_nix_store() -> Result>; + + // ========= + // CPathInfo + // ========= + + /// Mid-level wrapper for the `nix::ValidPathInfo` struct. + type CPathInfo; + + /// Returns the NAR hash of the store path. + fn nar_hash(self: Pin<&mut CPathInfo>) -> Hash; + + /// Returns the size of the NAR. + fn nar_size(self: Pin<&mut CPathInfo>) -> u64; + + /// Returns the references of the store path. + fn references(self: Pin<&mut CPathInfo>) -> UniquePtr>; + + /// Returns the possibly invalid signatures attached to the store path. + fn sigs(self: Pin<&mut CPathInfo>) -> UniquePtr>; + + /// Returns the CA field of the store path. + fn ca(self: Pin<&mut CPathInfo>) -> String; + } +} diff --git a/attic/src/nix_store/bindings/nix.cpp b/attic/src/nix_store/bindings/nix.cpp new file mode 100644 index 0000000..e63a55e --- /dev/null +++ b/attic/src/nix_store/bindings/nix.cpp @@ -0,0 +1,133 @@ +// C++ side of the libnixstore glue. +// +// We implement a mid-level wrapper of the Nix Store interface, +// which is then wrapped again in the Rust side to enable full +// async-await operation. +// +// Here we stick with the naming conventions of Rust and handle +// Rust types directly where possible, so that the interfaces are +// satisfying to use from the Rust side via cxx.rs. + +#include "attic/src/nix_store/bindings/nix.hpp" + +static nix::StorePath store_path_from_rust(RBasePathSlice base_name) { + std::string_view sv((const char *)base_name.data(), base_name.size()); + return nix::StorePath(sv); +} + +// ======== +// RustSink +// ======== + +RustSink::RustSink(RBox sender) : sender(std::move(sender)) {} + +void RustSink::operator () (std::string_view data) { + RBasePathSlice s((const unsigned char *)data.data(), data.size()); + + this->sender->send(s); +} + +void RustSink::eof() { + this->sender->eof(); +} + + +// ========= +// CPathInfo +// ========= + +CPathInfo::CPathInfo(nix::ref pi) : pi(pi) {} + +nix::Hash CPathInfo::nar_hash() { + return this->pi->narHash; +} + +uint64_t CPathInfo::nar_size() { + return this->pi->narSize; +} + +std::unique_ptr> CPathInfo::sigs() { + std::vector result; + for (auto&& elem : this->pi->sigs) { + result.push_back(std::string(elem)); + } + return std::make_unique>(result); +} + +std::unique_ptr> CPathInfo::references() { + std::vector result; + for (auto&& elem : this->pi->references) { + result.push_back(std::string(elem.to_string())); + } + return std::make_unique>(result); +} + +RString CPathInfo::ca() { + if (this->pi->ca) { + return RString(nix::renderContentAddress(this->pi->ca)); + } else { + return RString(""); + } +} + +// ========= +// CNixStore +// ========= + +CNixStore::CNixStore() { + std::map params; + this->store = nix::openStore("auto", params); +} + +RString CNixStore::store_dir() { + return RString(this->store->storeDir); +} + +std::unique_ptr CNixStore::query_path_info(RBasePathSlice base_name) { + auto store_path = store_path_from_rust(base_name); + + auto r = this->store->queryPathInfo(store_path); + return std::make_unique(r); +} + +std::unique_ptr> CNixStore::compute_fs_closure(RBasePathSlice base_name, bool flip_direction, bool include_outputs, bool include_derivers) { + std::set out; + + this->store->computeFSClosure(store_path_from_rust(base_name), out, flip_direction, include_outputs, include_derivers); + + std::vector result; + for (auto&& elem : out) { + result.push_back(std::string(elem.to_string())); + } + return std::make_unique>(result); +} + +std::unique_ptr> CNixStore::compute_fs_closure_multi(RSlice base_names, bool flip_direction, bool include_outputs, bool include_derivers) { + std::set path_set, out; + for (auto&& base_name : base_names) { + path_set.insert(store_path_from_rust(base_name)); + } + + this->store->computeFSClosure(path_set, out, flip_direction, include_outputs, include_derivers); + + std::vector result; + for (auto&& elem : out) { + result.push_back(std::string(elem.to_string())); + } + return std::make_unique>(result); +} + +void CNixStore::nar_from_path(RVec base_name, RBox sender) { + RustSink sink(std::move(sender)); + + std::string_view sv((const char *)base_name.data(), base_name.size()); + nix::StorePath store_path(sv); + + // exceptions will be thrown into Rust + this->store->narFromPath(store_path, sink); + sink.eof(); +} + +std::unique_ptr open_nix_store() { + return std::make_unique(); +} diff --git a/attic/src/nix_store/bindings/nix.hpp b/attic/src/nix_store/bindings/nix.hpp new file mode 100644 index 0000000..9fdab63 --- /dev/null +++ b/attic/src/nix_store/bindings/nix.hpp @@ -0,0 +1,77 @@ +// C++ side of the libnixstore glue. +// +// We implement a mid-level wrapper of the Nix Store interface, +// which is then wrapped again in the Rust side to enable full +// async-await operation. +// +// Here we stick with the naming conventions of Rust and handle +// Rust types directly where possible, so that the interfaces are +// satisfying to use from the Rust side via cxx.rs. + +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +template using RVec = rust::Vec; +template using RBox = rust::Box; +template using RSlice = rust::Slice; +using RString = rust::String; +using RStr = rust::Str; +using RBasePathSlice = RSlice; + +struct AsyncWriteSender; + +struct RustSink : nix::Sink +{ + RBox sender; +public: + RustSink(RBox sender); + void operator () (std::string_view data) override; + void eof(); +}; + +// Opaque wrapper for nix::ValidPathInfo +class CPathInfo { + nix::ref pi; +public: + CPathInfo(nix::ref pi); + nix::Hash nar_hash(); + uint64_t nar_size(); + std::unique_ptr> sigs(); + std::unique_ptr> references(); + RString ca(); +}; + +class CNixStore { + std::shared_ptr store; +public: + CNixStore(); + + RString store_dir(); + std::unique_ptr query_path_info(RBasePathSlice base_name); + std::unique_ptr> compute_fs_closure( + RBasePathSlice base_name, + bool flip_direction, + bool include_outputs, + bool include_derivers); + std::unique_ptr> compute_fs_closure_multi( + RSlice base_names, + bool flip_direction, + bool include_outputs, + bool include_derivers); + void nar_from_path(RVec base_name, RBox sender); +}; + +std::unique_ptr open_nix_store(); + +// Relies on our definitions +#include "attic/src/nix_store/bindings/mod.rs.h" diff --git a/attic/src/nix_store/mod.rs b/attic/src/nix_store/mod.rs new file mode 100644 index 0000000..ce4606d --- /dev/null +++ b/attic/src/nix_store/mod.rs @@ -0,0 +1,298 @@ +//! Nix store operations. +//! +//! ## FFI Bindings +//! +//! For now, the FFI bindings are for use in the client. We never +//! interact with the Nix store on the server. When the `nix_store` +//! crate feature is disabled, native Rust portions of this module +//! will still function. +//! +//! We use `libnixstore` to carry out most of the operations. +//! To interface with `libnixstore`, we first construct a simpler, +//! FFI-friendly API in C++ and then integrate with it using [cxx](https://cxx.rs) +//! and [rust-bindgen](https://rust-lang.github.io/rust-bindgen). +//! The glue interface is mostly object-oriented, with no pesky +//! C-style OOP functions or manual lifetime tracking. +//! +//! The C++-side code is responsible for translating the calls +//! into actual `libnixstore` invocations which are version-specific. +//! (we target Nix 2.4 and 2.5). +//! +//! We have the following goals: +//! - Retrieval of store path information +//! - Computation of closures +//! - Streaming of NAR archives +//! - Fully `async`/`await` API with support for concurrency +//! +//! ## Alternatives? +//! +//! The Nix source tree includes [`nix-rust`](https://github.com/NixOS/nix/tree/master/nix-rust) +//! which contains a limited implementation of various store operations. +//! It [used to](https://github.com/NixOS/nix/commit/bbe97dff8b3054d96e758f486f9ce3fa09e64de3) +//! contain an implementation of `StorePath` in Rust which was used from C++ +//! via FFI. It was [removed](https://github.com/NixOS/nix/commit/759947bf72c134592f0ce23d385e48095bd0a301) +//! half a year later due to memory consumption concerns. The current +//! `nix-rust` contains a set of `libnixstore` bindings, but they are low-level +//! and suffering from bitrot. +//! +//! For easier FFI, there is an attempt to make a C wrapper for `libnixstore` called +//! [libnixstore-c](https://github.com/andir/libnixstore-c). It offers +//! very limited amount of functionality. + +#[cfg(feature = "nix_store")] +#[allow(unsafe_code)] +mod bindings; + +#[cfg(feature = "nix_store")] +mod nix_store; + +use std::ffi::OsStr; +use std::os::unix::ffi::OsStrExt; +use std::path::{Path, PathBuf}; + +use lazy_static::lazy_static; +use regex::Regex; +use serde::{de, Deserialize, Serialize}; + +use crate::error::{AtticError, AtticResult}; +use crate::hash::Hash; + +#[cfg(feature = "nix_store")] +pub use bindings::{FfiHash, FfiHashType}; + +#[cfg(feature = "nix_store")] +pub use nix_store::NixStore; + +#[cfg(test)] +pub mod tests; + +/// Length of the hash in a store path. +pub const STORE_PATH_HASH_LEN: usize = 32; + +/// Regex that matches a store path hash, without anchors. +pub const STORE_PATH_HASH_REGEX_FRAGMENT: &str = "[0123456789abcdfghijklmnpqrsvwxyz]{32}"; + +lazy_static! { + /// Regex for a valid store path hash. + /// + /// This is the path portion of a base name. + static ref STORE_PATH_HASH_REGEX: Regex = { + Regex::new(&format!("^{}$", STORE_PATH_HASH_REGEX_FRAGMENT)).unwrap() + }; + + /// Regex for a valid store base name. + /// + /// A base name consists of two parts: A hash and a human-readable + /// label/name. The format of the hash is described in `StorePathHash`. + /// + /// The human-readable name can only contain the following characters: + /// + /// - A-Za-z0-9 + /// - `+-._?=` + /// + /// See the Nix implementation in `src/libstore/path.cc`. + static ref STORE_BASE_NAME_REGEX: Regex = { + Regex::new(r"^[0123456789abcdfghijklmnpqrsvwxyz]{32}-[A-Za-z0-9+-._?=]+$").unwrap() + }; +} + +/// A path in a Nix store. +/// +/// This must be a direct child of the store. This path may or +/// may not actually exist. +/// +/// This guarantees that the base name is of valid format. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct StorePath { + /// Base name of the store path. + /// + /// For example, for `/nix/store/ia70ss13m22znbl8khrf2hq72qmh5drr-ruby-2.7.5`, + /// this would be `ia70ss13m22znbl8khrf2hq72qmh5drr-ruby-2.7.5`. + base_name: PathBuf, +} + +/// A fixed-length store path hash. +/// +/// For example, for `/nix/store/ia70ss13m22znbl8khrf2hq72qmh5drr-ruby-2.7.5`, +/// this would be `ia70ss13m22znbl8khrf2hq72qmh5drr`. +/// +/// It must contain exactly 32 "base-32 characters". Nix's special scheme +/// include the following valid characters: "0123456789abcdfghijklmnpqrsvwxyz" +/// ('e', 'o', 'u', 't' are banned). +/// +/// Examples of invalid store path hashes: +/// +/// - "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" +/// - "IA70SS13M22ZNBL8KHRF2HQ72QMH5DRR" +/// - "whatevenisthisthing" +#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize)] +pub struct StorePathHash(String); + +/// Information on a valid store path. +#[derive(Debug)] +pub struct ValidPathInfo { + /// The store path. + pub path: StorePath, + + /// Hash of the NAR. + pub nar_hash: Hash, + + /// Size of the NAR. + pub nar_size: u64, + + /// References. + /// + /// This list only contains base names of the paths. + pub references: Vec, + + /// Signatures. + pub sigs: Vec, + + /// Content Address. + pub ca: Option, +} + +#[cfg_attr(not(feature = "nix_store"), allow(dead_code))] +impl StorePath { + /// Creates a StorePath with a base name. + fn from_base_name(base_name: PathBuf) -> AtticResult { + let s = base_name + .as_os_str() + .to_str() + .ok_or_else(|| AtticError::InvalidStorePathName { + base_name: base_name.clone(), + reason: "Name contains non-UTF-8 characters", + })?; + + if !STORE_BASE_NAME_REGEX.is_match(s) { + return Err(AtticError::InvalidStorePathName { + base_name, + reason: "Name is of invalid format", + }); + } + + Ok(Self { base_name }) + } + + /// Creates a StorePath with a known valid base name. + /// + /// # Safety + /// + /// The caller must ensure that the name is of a valid format (refer + /// to the documentations for `STORE_BASE_NAME_REGEX`). Other operations + /// with this object will assume it's valid. + #[allow(unsafe_code)] + unsafe fn from_base_name_unchecked(base_name: PathBuf) -> Self { + Self { base_name } + } + + /// Gets the hash portion of the store path. + pub fn to_hash(&self) -> StorePathHash { + // Safety: We have already validated the format of the base name, + // including the hash part. The name is guaranteed valid UTF-8. + #[allow(unsafe_code)] + unsafe { + let s = std::str::from_utf8_unchecked(self.base_name.as_os_str().as_bytes()); + let hash = s[..STORE_PATH_HASH_LEN].to_string(); + StorePathHash::new_unchecked(hash) + } + } + + /// Returns the human-readable name. + pub fn name(&self) -> String { + // Safety: Already checked + #[allow(unsafe_code)] + unsafe { + let s = std::str::from_utf8_unchecked(self.base_name.as_os_str().as_bytes()); + s[STORE_PATH_HASH_LEN + 1..].to_string() + } + } + + pub fn as_os_str(&self) -> &OsStr { + self.base_name.as_os_str() + } + + #[cfg_attr(not(feature = "nix_store"), allow(dead_code))] + fn as_base_name_bytes(&self) -> &[u8] { + self.base_name.as_os_str().as_bytes() + } +} + +impl StorePathHash { + /// Creates a store path hash from a string. + pub fn new(hash: String) -> AtticResult { + if hash.as_bytes().len() != STORE_PATH_HASH_LEN { + return Err(AtticError::InvalidStorePathHash { + hash, + reason: "Hash is of invalid length", + }); + } + + if !STORE_PATH_HASH_REGEX.is_match(&hash) { + return Err(AtticError::InvalidStorePathHash { + hash, + reason: "Hash is of invalid format", + }); + } + + Ok(Self(hash)) + } + + /// Creates a store path hash from a string, without checking its validity. + /// + /// # Safety + /// + /// The caller must make sure that it is of expected length and format. + #[allow(unsafe_code)] + pub unsafe fn new_unchecked(hash: String) -> Self { + Self(hash) + } + + pub fn as_str(&self) -> &str { + &self.0 + } + + pub fn to_string(&self) -> String { + self.0.clone() + } +} + +impl<'de> Deserialize<'de> for StorePathHash { + /// Deserializes a potentially-invalid store path hash. + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + use de::Error; + String::deserialize(deserializer) + .and_then(|s| Self::new(s).map_err(|e| Error::custom(e.to_string()))) + } +} + +/// Returns the base store name of a path relative to a store root. +#[cfg_attr(not(feature = "nix_store"), allow(dead_code))] +fn to_base_name(store_dir: &Path, path: &Path) -> AtticResult { + if let Ok(remaining) = path.strip_prefix(store_dir) { + let first = remaining + .iter() + .next() + .ok_or_else(|| AtticError::InvalidStorePath { + path: path.to_owned(), + reason: "Path is store directory itself", + })?; + + if first.len() < STORE_PATH_HASH_LEN { + Err(AtticError::InvalidStorePath { + path: path.to_owned(), + reason: "Path is too short", + }) + } else { + Ok(PathBuf::from(first)) + } + } else { + Err(AtticError::InvalidStorePath { + path: path.to_owned(), + reason: "Path is not in store directory", + }) + } +} diff --git a/attic/src/nix_store/nix_store.rs b/attic/src/nix_store/nix_store.rs new file mode 100644 index 0000000..0561b07 --- /dev/null +++ b/attic/src/nix_store/nix_store.rs @@ -0,0 +1,236 @@ +//! High-level Nix Store interface. + +use std::ffi::OsStr; +use std::os::unix::ffi::OsStrExt; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use tokio::task::spawn_blocking; + +use super::bindings::{open_nix_store, AsyncWriteAdapter, FfiNixStore}; +use super::{to_base_name, StorePath, ValidPathInfo}; +use crate::error::AtticResult; + +/// High-level wrapper for the Unix Domain Socket Nix Store. +pub struct NixStore { + /// The Nix store FFI. + inner: Arc, + + /// Path to the Nix store itself. + store_dir: PathBuf, +} + +#[cfg(feature = "nix_store")] +impl NixStore { + pub fn connect() -> AtticResult { + #[allow(unsafe_code)] + let inner = unsafe { open_nix_store()? }; + let store_dir = PathBuf::from(inner.store().store_dir()); + + Ok(Self { + inner: Arc::new(inner), + store_dir, + }) + } + + /// Returns the Nix store directory. + pub fn store_dir(&self) -> &Path { + &self.store_dir + } + + /// Returns the base store path of a path, following any symlinks. + /// + /// This is a simple wrapper over `parse_store_path` that also + /// follows symlinks. + pub fn follow_store_path>(&self, path: P) -> AtticResult { + // Some cases to consider: + // + // - `/nix/store/eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-nixos-system-x/sw` (a symlink to sw) + // - `eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-nixos-system-x` + // - We don't resolve the `sw` symlink since the full store path is specified + // (this is a design decision) + // - `/run/current-system` (a symlink to profile) + // - `eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-nixos-system-x` + // - `/run/current-system/` (with a trailing slash) + // - `eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-nixos-system-x` + // - `/run/current-system/sw` (a symlink to sw) + // - `eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-system-path` (!) + let path = path.as_ref(); + if path.strip_prefix(&self.store_dir).is_ok() { + // Is in the store - directly strip regardless of being a symlink or not + self.parse_store_path(path) + } else { + // Canonicalize then parse + let canon = path.canonicalize()?; + self.parse_store_path(canon) + } + } + + /// Returns the base store path of a path. + /// + /// This function does not validate whether the path is actually in the + /// Nix store or not. + /// + /// The path must be under the store directory. See `follow_store_path` + /// for an alternative that follows symlinks. + pub fn parse_store_path>(&self, path: P) -> AtticResult { + let base_name = to_base_name(&self.store_dir, path.as_ref())?; + StorePath::from_base_name(base_name) + } + + /// Returns the full path for a base store path. + pub fn get_full_path(&self, store_path: &StorePath) -> PathBuf { + self.store_dir.join(&store_path.base_name) + } + + /// Creates a NAR archive from a path. + /// + /// This is akin to `nix-store --dump`. + pub fn nar_from_path(&self, store_path: StorePath) -> AsyncWriteAdapter { + let inner = self.inner.clone(); + let (adapter, mut sender) = AsyncWriteAdapter::new(); + let base_name = Vec::from(store_path.as_base_name_bytes()); + + spawn_blocking(move || { + // Send all exceptions through the channel, and ignore errors + // during sending (the channel may have been closed). + if let Err(e) = inner.store().nar_from_path(base_name, sender.clone()) { + let _ = sender.rust_error(e); + } + }); + + adapter + } + + /// Returns the closure of a valid path. + /// + /// If `flip_directions` is true, the set of paths that can reach `store_path` is + /// returned. + pub async fn compute_fs_closure( + &self, + store_path: StorePath, + flip_directions: bool, + include_outputs: bool, + include_derivers: bool, + ) -> AtticResult> { + let inner = self.inner.clone(); + + spawn_blocking(move || { + let base_name = store_path.as_base_name_bytes(); + + let cxx_vector = inner.store().compute_fs_closure( + base_name, + flip_directions, + include_outputs, + include_derivers, + )?; + + Ok(cxx_vector + .iter() + .map(|s| { + let osstr = OsStr::from_bytes(s.as_bytes()); + let pb = PathBuf::from(osstr); + + // Safety: The C++ implementation already checks the StorePath + // for correct format (which also implies valid UTF-8) + #[allow(unsafe_code)] + unsafe { + StorePath::from_base_name_unchecked(pb) + } + }) + .collect()) + }) + .await + .unwrap() + } + + /// Returns the closure of a set of valid paths. + /// + /// This is the multi-path variant of `compute_fs_closure`. + /// If `flip_directions` is true, the set of paths that can reach `store_path` is + /// returned. + pub async fn compute_fs_closure_multi( + &self, + store_paths: Vec, + flip_directions: bool, + include_outputs: bool, + include_derivers: bool, + ) -> AtticResult> { + let inner = self.inner.clone(); + + spawn_blocking(move || { + let plain_base_names: Vec<&[u8]> = store_paths + .iter() + .map(|sp| sp.as_base_name_bytes()) + .collect(); + + let cxx_vector = inner.store().compute_fs_closure_multi( + &plain_base_names, + flip_directions, + include_outputs, + include_derivers, + )?; + + Ok(cxx_vector + .iter() + .map(|s| { + let osstr = OsStr::from_bytes(s.as_bytes()); + let pb = PathBuf::from(osstr); + + // Safety: The C++ implementation already checks the StorePath + // for correct format (which also implies valid UTF-8) + #[allow(unsafe_code)] + unsafe { + StorePath::from_base_name_unchecked(pb) + } + }) + .collect()) + }) + .await + .unwrap() + } + + /// Returns detailed information on a path. + pub async fn query_path_info(&self, store_path: StorePath) -> AtticResult { + let inner = self.inner.clone(); + + spawn_blocking(move || { + let base_name = store_path.as_base_name_bytes(); + let mut c_path_info = inner.store().query_path_info(base_name)?; + + // FIXME: Make this more ergonomic and efficient + let nar_size = c_path_info.pin_mut().nar_size(); + let nar_hash = c_path_info.pin_mut().nar_hash(); + let references = c_path_info + .pin_mut() + .references() + .iter() + .map(|s| { + let osstr = OsStr::from_bytes(s.as_bytes()); + PathBuf::from(osstr) + }) + .collect(); + let sigs = c_path_info + .pin_mut() + .sigs() + .iter() + .map(|s| { + let osstr = OsStr::from_bytes(s.as_bytes()); + osstr.to_str().unwrap().to_string() + }) + .collect(); + let ca = c_path_info.pin_mut().ca(); + + Ok(ValidPathInfo { + path: store_path, + nar_size, + nar_hash: nar_hash.into_rust()?, + references, + sigs, + ca: if ca.is_empty() { None } else { Some(ca) }, + }) + }) + .await + .unwrap() + } +} diff --git a/attic/src/nix_store/tests/.gitattributes b/attic/src/nix_store/tests/.gitattributes new file mode 100644 index 0000000..fa1385d --- /dev/null +++ b/attic/src/nix_store/tests/.gitattributes @@ -0,0 +1 @@ +* -text diff --git a/attic/src/nix_store/tests/README.md b/attic/src/nix_store/tests/README.md new file mode 100644 index 0000000..76e847a --- /dev/null +++ b/attic/src/nix_store/tests/README.md @@ -0,0 +1,14 @@ +# Tests + +The included tests require trusted user access to import the test NAR dumps. + +## Test Derivations + +To keep things minimal, we have a couple of polyglot derivations that double as their builders in `drv`. +They result in the following store paths when built: + +- `no-deps.nix` -> `/nix/store/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps` +- `with-deps.nix` -> `/nix/store/7wp86qa87v2pwh6sr2a02qci0h71rs9z-attic-test-with-deps` + +NAR dumps for those store paths are included in `nar`. +`.nar` files are produced by `nix-store --export`, and `.export` files are produced by `nix-store --export`. diff --git a/attic/src/nix_store/tests/drv/no-deps.nix b/attic/src/nix_store/tests/drv/no-deps.nix new file mode 100755 index 0000000..8e3f7a4 --- /dev/null +++ b/attic/src/nix_store/tests/drv/no-deps.nix @@ -0,0 +1,7 @@ +#!/bin/sh +/*/sh -c "echo Hi! I have no dependencies. > $out"; exit 0; */ +derivation { + name = "attic-test-no-deps"; + builder = ./no-deps.nix; + system = "x86_64-linux"; +} diff --git a/attic/src/nix_store/tests/drv/with-deps.nix b/attic/src/nix_store/tests/drv/with-deps.nix new file mode 100755 index 0000000..ecca3fb --- /dev/null +++ b/attic/src/nix_store/tests/drv/with-deps.nix @@ -0,0 +1,21 @@ +#!/bin/sh +/*/sh -c "echo Hi! I depend on $dep. > $out"; exit 0; */ +let + a = derivation { + name = "attic-test-with-deps-a"; + builder = ./with-deps.nix; + system = "x86_64-linux"; + dep = b; + }; + b = derivation { + name = "attic-test-with-deps-b"; + builder = ./with-deps.nix; + system = "x86_64-linux"; + dep = c; + }; + c = derivation { + name = "attic-test-with-deps-c-final"; + builder = ./with-deps.nix; + system = "x86_64-linux"; + }; +in a diff --git a/attic/src/nix_store/tests/mod.rs b/attic/src/nix_store/tests/mod.rs new file mode 100644 index 0000000..3d23922 --- /dev/null +++ b/attic/src/nix_store/tests/mod.rs @@ -0,0 +1,255 @@ +use super::*; + +use std::collections::HashSet; +use std::ffi::OsStr; +use std::os::unix::ffi::OsStrExt; +use std::process::Command; + +use serde::de::DeserializeOwned; +use tokio_test::block_on; + +pub mod test_nar; + +fn connect() -> NixStore { + NixStore::connect().expect("Failed to connect to the Nix store") +} + +/// Evaluates a Nix expression using the command-line interface. +fn cli_eval(expression: &str) -> T +where + T: DeserializeOwned, +{ + let cli = Command::new("nix-instantiate") + .args(["--eval", "--json", "-E", expression]) + .output() + .expect("Failed to evaluate"); + + if !cli.status.success() { + panic!("Evaluation of '{}' failed: {:?}", expression, cli.status); + } + + let json = std::str::from_utf8(&cli.stdout).expect("Result not valid UTF-8"); + + serde_json::from_str(json).expect("Failed to parse output") +} + +fn assert_base_name(store: &str, path: &str, expected: &str) { + let expected = PathBuf::from(expected); + + assert_eq!( + expected, + to_base_name(store.as_ref(), path.as_ref()).unwrap(), + ); +} + +fn assert_base_name_err(store: &str, path: &str, err: &str) { + let e = to_base_name(store.as_ref(), path.as_ref()).unwrap_err(); + + if let AtticError::InvalidStorePath { path: _, reason } = e { + assert!(reason.contains(err)); + } else { + panic!("to_base_name didn't return an InvalidStorePath"); + } +} + +#[test] +fn test_connect() { + connect(); +} + +#[test] +fn test_store_dir() { + let store = connect(); + let expected: PathBuf = cli_eval("builtins.storeDir"); + assert_eq!(store.store_dir(), expected); +} + +#[test] +fn test_to_base_name() { + assert_base_name( + "/nix/store", + "/nix/store/3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0", + "3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0", + ); + assert_base_name( + "/gnu/store", + "/gnu/store/3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0/", + "3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0", + ); + assert_base_name( + "/nix/store", + "/nix/store/3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0/bin/firefox", + "3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0", + ); + assert_base_name_err( + "/gnu/store", + "/nix/store/3iq73s1p4mh4mrflj2k1whkzsimxf0l7-firefox-91.0", + "Path is not in store directory", + ); + assert_base_name_err("/nix/store", "/nix/store", "Path is store directory itself"); + assert_base_name_err( + "/nix/store", + "/nix/store/", + "Path is store directory itself", + ); + assert_base_name_err("/nix/store", "/nix/store/tooshort", "Path is too short"); +} + +#[test] +fn test_base_name() { + let bn = PathBuf::from("ia70ss13m22znbl8khrf2hq72qmh5drr-ruby-2.7.5"); + StorePath::from_base_name(bn).unwrap(); + + // name has invalid UTF-8 + let osstr = OsStr::from_bytes(b"ia70ss13m22znbl8khrf2hq72qmh5drr-\xc3"); + let bn = PathBuf::from(osstr); + StorePath::from_base_name(bn).unwrap_err(); + + // hash has bad characters + let bn = PathBuf::from("eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-ruby-2.7.5"); + StorePath::from_base_name(bn).unwrap_err(); + + // name has bad characters + let bn = PathBuf::from("ia70ss13m22znbl8khrf2hq72qmh5drr-shocking!!!"); + StorePath::from_base_name(bn).unwrap_err(); + + // name portion empty + let bn = PathBuf::from("ia70ss13m22znbl8khrf2hq72qmh5drr-"); + StorePath::from_base_name(bn).unwrap_err(); + + // no name portion + let bn = PathBuf::from("ia70ss13m22znbl8khrf2hq72qmh5drr"); + StorePath::from_base_name(bn).unwrap_err(); + + // too short + let bn = PathBuf::from("ia70ss13m22znbl8khrf2hq"); + StorePath::from_base_name(bn).unwrap_err(); +} + +#[test] +fn test_store_path_hash() { + // valid base-32 hash + let h = "ia70ss13m22znbl8khrf2hq72qmh5drr".to_string(); + StorePathHash::new(h).unwrap(); + + // invalid characters + let h = "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee".to_string(); + StorePathHash::new(h).unwrap_err(); + let h = "IA70SS13M22ZNBL8KHRF2HQ72QMH5DRR".to_string(); + StorePathHash::new(h).unwrap_err(); + + // too short + let h = "ia70ss13m22znbl8khrf2hq".to_string(); + StorePathHash::new(h).unwrap_err(); +} + +#[test] +fn test_nar_streaming() { + let store = NixStore::connect().expect("Failed to connect to the Nix store"); + + block_on(async move { + let test_nar = test_nar::NO_DEPS; + test_nar.import().await.expect("Could not import test NAR"); + + let target = test_nar.get_target().expect("Could not create dump target"); + let writer = target.get_writer().await.expect("Could not get writer"); + + let store_path = store.parse_store_path(test_nar.path()).unwrap(); + + let stream = store.nar_from_path(store_path); + stream.write_all(writer).await.unwrap(); + + target + .validate() + .await + .expect("Could not validate resulting dump"); + }); +} + +#[test] +fn test_compute_fs_closure() { + let store = NixStore::connect().expect("Failed to connect to the Nix store"); + + block_on(async move { + use test_nar::{WITH_DEPS_A, WITH_DEPS_B, WITH_DEPS_C}; + + for nar in [WITH_DEPS_C, WITH_DEPS_B, WITH_DEPS_A] { + nar.import().await.expect("Could not import test NAR"); + + let path = store + .parse_store_path(nar.path()) + .expect("Could not parse store path"); + + let actual: HashSet = store + .compute_fs_closure(path, false, false, false) + .await + .expect("Could not compute closure") + .into_iter() + .collect(); + + assert_eq!(nar.closure(), actual); + } + }); +} + +#[test] +fn test_compute_fs_closure_multi() { + let store = NixStore::connect().expect("Failed to connect to the Nix store"); + + block_on(async move { + use test_nar::{NO_DEPS, WITH_DEPS_A, WITH_DEPS_B, WITH_DEPS_C}; + + for nar in [NO_DEPS, WITH_DEPS_C, WITH_DEPS_B, WITH_DEPS_A] { + nar.import().await.expect("Could not import test NAR"); + } + + let mut expected = NO_DEPS.closure(); + expected.extend(WITH_DEPS_A.closure()); + + let paths = vec![ + store.parse_store_path(WITH_DEPS_A.path()).unwrap(), + store.parse_store_path(NO_DEPS.path()).unwrap(), + ]; + + let actual: HashSet = store + .compute_fs_closure_multi(paths, false, false, false) + .await + .expect("Could not compute closure") + .into_iter() + .collect(); + + eprintln!("Closure: {:#?}", actual); + + assert_eq!(expected, actual); + }); +} + +#[test] +fn test_query_path_info() { + let store = NixStore::connect().expect("Failed to connect to the Nix store"); + + block_on(async move { + use test_nar::{WITH_DEPS_B, WITH_DEPS_C}; + + for nar in [WITH_DEPS_C, WITH_DEPS_B] { + nar.import().await.expect("Could not import test NAR"); + } + + let nar = WITH_DEPS_B; + let path = store.parse_store_path(nar.path()).unwrap(); + let path_info = store + .query_path_info(path) + .await + .expect("Could not query path info"); + + eprintln!("Path info: {:?}", path_info); + + assert_eq!(nar.nar().len() as u64, path_info.nar_size); + assert_eq!( + vec![PathBuf::from( + "3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final" + ),], + path_info.references + ); + }); +} diff --git a/attic/src/nix_store/tests/nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final b/attic/src/nix_store/tests/nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final new file mode 100644 index 0000000..a51f6a4 --- /dev/null +++ b/attic/src/nix_store/tests/nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final @@ -0,0 +1 @@ +Hi! I depend on . diff --git a/attic/src/nix_store/tests/nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.export b/attic/src/nix_store/tests/nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.export new file mode 100644 index 0000000000000000000000000000000000000000..d43556972229dad397e6960e2b2910b3c56a683b GIT binary patch literal 344 zcmaKm&uYRz5XK`EDnc)Pg7jt&YZ@h(7bqwc&t8YkZe}%^YC-pj2!h~TzWKfx zelw%wT5qD{&I*D?cQh&=vXSId(HBwfy}7in;)AIV4TNMYUU$TXIKGQtxsP<6R>m8u z(vH&E5|HVR1lLL$}dVyFU?6TV&H)Clk@XR kQu9iRg`j+oOhpAxg_P8S)Vvgh{5%CcE{Fuod`+l203e4Fga7~l literal 0 HcmV?d00001 diff --git a/attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b b/attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b new file mode 100644 index 0000000..99fafd7 --- /dev/null +++ b/attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b @@ -0,0 +1 @@ +Hi! I depend on /nix/store/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final. diff --git a/attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.export b/attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.export new file mode 100644 index 0000000000000000000000000000000000000000..966e13e099a76bfa8ba9b5fd8c203c416c057f6b GIT binary patch literal 488 zcmbu4K~KXV7>2jRWX8n*(B1UFTG~$9tBGd2csAZBg+c{NDUQ`&ZaT_}6w4>S$AEEEWI@(7y zHB{G8r=xfh#e?fCjk8baM3EU>ETNs#tk1 literal 0 HcmV?d00001 diff --git a/attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.nar b/attic/src/nix_store/tests/nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.nar new file mode 100644 index 0000000000000000000000000000000000000000..01815661e803462e36abab13753bbabfc064e5b1 GIT binary patch literal 208 zcmXwzK@NgI3`H?9(U^FLai@!lU<@2Vui$Pn6sL$~oT(zW7sJ@i@B53su9In)KA^p^ z^hU_WY8Lx#%y~>19RAitWRJI3t9`PJY&sTX3D?MH+Ng`F&>>fIi^ wc#tVwVNb3$&)V8Uqq*cE*F~0t79j(IL7*qGFkT5@peEMsel-dHf9|613vv%EmH+?% literal 0 HcmV?d00001 diff --git a/attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a b/attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a new file mode 100644 index 0000000..b10d7ef --- /dev/null +++ b/attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a @@ -0,0 +1 @@ +Hi! I depend on /nix/store/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b. diff --git a/attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.export b/attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.export new file mode 100644 index 0000000000000000000000000000000000000000..7769030820c11838c7c71e45cde1d866d65706e8 GIT binary patch literal 480 zcmbV`KTpFj6vSHz353LV2pbG;+$6N=iUgty69cRNHL;U8j%`wtPfwz}N|h)JZ@G6j z{O*>E`R+iq42W1(%3h%Lm@OfH0KEeB-$qRDHFztEubSA!4S3dSFSK_b;NRqZw2!zj zLi4EAQ9KsK&bL;?Pg&M6Rvrshm>~^vO=(jn`5{+ogYnKF;=N?Z3+K^M`Vx&4hv@2d zLZ9|MLeKmD^JRLU!JqZ$ZI{Wdts7cpVaPk~f@wC2mR1K#y5w>XnL($X3*3M1pSiEU jcOI}}p~twkSwG&cplL&slvj$G%B8~}J6GKH06UW(suOj{ literal 0 HcmV?d00001 diff --git a/attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.nar b/attic/src/nix_store/tests/nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.nar new file mode 100644 index 0000000000000000000000000000000000000000..290c31fe2c8349efd22567cf0c45e50035a138ab GIT binary patch literal 200 zcmXwz!485j5JWLC(U|xT^`?ilplI+9^zIKpS)l|=ZC63R-V)cn%)U9y%rg3&(F5d8 z)%t~y1Z%?nnsv%bZ;RtQV|G|Q+uTKSX4M%&L;lKq)+=!lC0aD4=!}q|NP6lV%0rqy vl&bqmRra$VbW#-ec9-3VX;BLtvyLN(`EEx)gdgrvCMj>6HYUTiyqc z_ij4*y+=aSXU45iIsnU~Hzj{b_(14?IvDAUw1xWAa66fk7A<-}@1971=;x$L6c3PK zn@H#YC7{sY(#070+gVZmv-)};>7H~JAIqF?Eo5l&@12vTJd|1(C5wJtlrO4@?ru6I$l_}Grc-*j5-dsww>VHaHlVoHe5Sz*#*ef BM`i#3 literal 0 HcmV?d00001 diff --git a/attic/src/nix_store/tests/nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.nar b/attic/src/nix_store/tests/nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.nar new file mode 100644 index 0000000000000000000000000000000000000000..9f0063e67fdcf0ed9cdd4ded0cb5dfc40755bb68 GIT binary patch literal 144 zcmd;OfPlQr3f;t_5|HVR1lLL$}dVyFU?6TV&H)Clk@XR uQu9iRWuSbIOhpAxg^a|qRE4~Jg_P8S)V!3`yyVQ(Vm&U95|}-jQ1t*KO%|yD literal 0 HcmV?d00001 diff --git a/attic/src/nix_store/tests/test_nar.rs b/attic/src/nix_store/tests/test_nar.rs new file mode 100644 index 0000000..ae0759a --- /dev/null +++ b/attic/src/nix_store/tests/test_nar.rs @@ -0,0 +1,245 @@ +//! Utilities for testing the NAR dump functionality. + +use std::collections::HashSet; +use std::io; +use std::path::{Path, PathBuf}; +use std::pin::Pin; +use std::process::Stdio; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use tempfile::NamedTempFile; +use tokio::fs::{File, OpenOptions}; +use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use tokio::process::Command; + +use crate::error::AtticResult; +use crate::nix_store::StorePath; + +/// Expected values for `nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps`. +pub const NO_DEPS: TestNar = TestNar { + store_path: "/nix/store/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps", + original_file: include_bytes!("nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps"), + nar: include_bytes!("nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.nar"), + export: include_bytes!("nar/nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps.export"), + closure: &["nm1w9sdm6j6icmhd2q3260hl1w9zj6li-attic-test-no-deps"], +}; + +/// Expected values for `n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a`. +/// +/// This depends on `544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b` as well +/// as `3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final`. +pub const WITH_DEPS_A: TestNar = TestNar { + store_path: "/nix/store/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a", + original_file: include_bytes!("nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a"), + nar: include_bytes!("nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.nar"), + export: include_bytes!("nar/n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a.export"), + closure: &[ + "n7q4i7rlmbk4xz8qdsxpm6jbhrnxraq2-attic-test-with-deps-a", + "544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b", + "3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final", + ], +}; + +/// Expected values for `544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b`. +/// +/// This depends on `3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final`. +pub const WITH_DEPS_B: TestNar = TestNar { + store_path: "/nix/store/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b", + original_file: include_bytes!("nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b"), + nar: include_bytes!("nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.nar"), + export: include_bytes!("nar/544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b.export"), + closure: &[ + "544qcchwgcgpz3xi1bbml28f8jj6009p-attic-test-with-deps-b", + "3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final", + ], +}; + +/// Expected values for `3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final`. +pub const WITH_DEPS_C: TestNar = TestNar { + store_path: "/nix/store/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final", + original_file: include_bytes!( + "nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final" + ), + nar: include_bytes!("nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.nar"), + export: include_bytes!( + "nar/3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final.export" + ), + closure: &["3k1wymic8p7h5pfcqfhh0jan8ny2a712-attic-test-with-deps-c-final"], +}; + +/// A test NAR. +#[derive(Debug, Clone)] +pub struct TestNar { + /// Full path in the Nix Store when imported. + store_path: &'static str, + + /// The original file. + original_file: &'static [u8], + + /// A NAR dump without path metadata. + nar: &'static [u8], + + /// An importable NAR dump produced by `nix-store --export`. + export: &'static [u8], + + /// The expected closure. + closure: &'static [&'static str], +} + +/// A target that can receive and verify a NAR dump. +pub struct NarDump { + /// The produced NAR dump. + actual: NamedTempFile, + + /// The expected values. + expected: TestNar, +} + +pub struct NarDumpWriter { + file: File, + _lifetime: Arc, +} + +impl TestNar { + /// Attempts to import the NAR into the local Nix Store. + /// + /// This requires the current user to be trusted by the nix-daemon. + pub async fn import(&self) -> io::Result<()> { + let mut child = Command::new("nix-store") + .arg("--import") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn()?; + + let mut stdin = child.stdin.take().unwrap(); + stdin.write_all(self.export).await?; + drop(stdin); + + let output = child.wait_with_output().await?; + if !output.status.success() { + let e = format!("Nix exit with code {:?}", output.status.code()); + return Err(io::Error::new(io::ErrorKind::Other, e)); + } + + // ensure that we imported the correct thing + let store_path = String::from_utf8_lossy(&output.stdout); + let store_path = store_path.trim_end(); + if store_path != self.store_path { + let e = format!( + "Import resulted in \"{}\", but we want \"{}\"", + store_path, self.store_path + ); + return Err(io::Error::new(io::ErrorKind::Other, e)); + } + + Ok(()) + } + + /// Returns the full store path that will be present when imported. + pub fn path(&self) -> &Path { + Path::new(self.store_path) + } + + /// Returns the closure of the store path. + pub fn closure(&self) -> HashSet { + self.closure + .iter() + .map(|bp| { + let bp = PathBuf::from(bp); + StorePath::from_base_name(bp) + }) + .collect::>>() + .unwrap() + } + + /// Returns the raw expected NAR. + pub fn nar(&self) -> &[u8] { + self.nar + } + + /// Creates a new test target. + pub fn get_target(&self) -> io::Result> { + let target = NarDump::new(self.clone())?; + Ok(Arc::new(target)) + } +} + +impl NarDump { + /// Creates a new dump target. + fn new(expected: TestNar) -> io::Result { + Ok(Self { + actual: NamedTempFile::new()?, + expected, + }) + } + + /// Returns a handle to write to the buffer. + pub async fn get_writer(self: &Arc) -> io::Result> { + let file = OpenOptions::new() + .read(false) + .write(true) + .open(self.actual.path()) + .await?; + + Ok(Box::new(NarDumpWriter { + file, + _lifetime: self.clone(), + })) + } + + /// Validates the resulting dump against expected values. + pub async fn validate(&self) -> io::Result<()> { + let mut file = File::open(self.actual.path()).await?; + + let metadata = file.metadata().await?; + if metadata.len() != self.expected.nar.len() as u64 { + let e = format!( + "Length mismatch - Got {}, should be {}", + metadata.len(), + self.expected.nar.len() + ); + return Err(io::Error::new(io::ErrorKind::InvalidData, e)); + } + + let mut bytes = Vec::new(); + file.read_to_end(&mut bytes).await?; + if bytes != self.expected.nar { + assert_eq!(bytes.len(), self.expected.nar.len()); + + for i in 0..bytes.len() { + if bytes[i] != self.expected.nar[i] { + eprintln!( + "Byte {} mismatch - We got {}, should be {}", + i, bytes[i], self.expected.nar[i] + ); + } + } + + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Content mismatch", + )); + } + + Ok(()) + } +} + +impl AsyncWrite for NarDumpWriter { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.file).poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.file).poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.file).poll_shutdown(cx) + } +} diff --git a/attic/src/signing/mod.rs b/attic/src/signing/mod.rs new file mode 100644 index 0000000..21aa38d --- /dev/null +++ b/attic/src/signing/mod.rs @@ -0,0 +1,271 @@ +//! Object Signing and Verification. +//! +//! Nix utilitizes Ed25519 to generate signatures on NAR hashes. Currently +//! we can either generate signatures on the fly per request, or cache them +//! in the data store. +//! +//! ## String format +//! +//! All signing-related strings in Nix follow the same format (henceforth +//! "the canonical format"): +//! +//! ```text +//! {keyName}:{base64Payload} +//! ``` +//! +//! We follow the same format, so keys generated using the Nix CLI will +//! simply work. +//! +//! ## Serde +//! +//! `Serialize` and `Deserialize` are implemented to convert the structs +//! from and to the canonical format. + +use std::convert::TryInto; + +use serde::{de, ser, Deserialize, Serialize}; + +use base64::DecodeError; +use displaydoc::Display; +use ed25519_compact::{Error as SignatureError, KeyPair, PublicKey, Signature}; + +use crate::error::AtticResult; + +#[cfg(test)] +mod tests; + +/// An ed25519 keypair for signing. +#[derive(Debug)] +pub struct NixKeypair { + /// Name of this key. + name: String, + + /// The keypair. + keypair: KeyPair, +} + +/// An ed25519 public key for verification. +#[derive(Debug, Clone)] +pub struct NixPublicKey { + /// Name of this key. + name: String, + + /// The public key. + public: PublicKey, +} + +/// A signing error. +#[derive(Debug, Display)] +#[ignore_extra_doc_attributes] +pub enum Error { + /// Signature error: {0} + SignatureError(SignatureError), + + /// The string has a wrong key name attached to it: Our name is "{our_name}" and the string has "{string_name}" + WrongKeyName { + our_name: String, + string_name: String, + }, + + /// The string lacks a colon separator. + NoColonSeparator, + + /// The name portion of the string is blank. + BlankKeyName, + + /// The payload portion of the string is blank. + BlankPayload, + + /// Base64 decode error: {0} + Base64DecodeError(DecodeError), + + /// Invalid base64 payload length: Expected {expected} ({usage}), got {actual} + InvalidPayloadLength { + expected: usize, + actual: usize, + usage: &'static str, + }, + + /// Invalid signing key name "{0}". + /// + /// A valid name cannot be empty and must be contain colons (:). + InvalidSigningKeyName(String), +} + +impl NixKeypair { + /// Generates a new keypair. + pub fn generate(name: &str) -> AtticResult { + // TODO: Make this configurable? + let keypair = KeyPair::generate(); + + validate_name(name)?; + + Ok(Self { + name: name.to_string(), + keypair, + }) + } + + /// Imports an existing keypair from its canonical representation. + pub fn from_str(keypair: &str) -> AtticResult { + let (name, bytes) = decode_string(keypair, "keypair", KeyPair::BYTES, None)?; + + let keypair = KeyPair::from_slice(&bytes).map_err(Error::SignatureError)?; + + Ok(Self { + name: name.to_string(), + keypair, + }) + } + + /// Returns the canonical representation of the keypair. + /// + /// This results in a 64-byte base64 payload that contains both the private + /// key and the public key, in that order. + /// + /// For example, it can look like: + /// attic-test:msdoldbtlongtt0/xkzmcbqihd7yvy8iomajqhnkutsl3b1pyyyc0mgg2rs0ttzzuyuk9rb2zphvtpes71mlha== + pub fn export_keypair(&self) -> String { + format!("{}:{}", self.name, base64::encode(*self.keypair)) + } + + /// Returns the canonical representation of the public key. + /// + /// For example, it can look like: + /// attic-test:C929acssgtJoINkUtLbc81GFJPUW9maR77TxEu9ZpRw= + pub fn export_public_key(&self) -> String { + format!("{}:{}", self.name, base64::encode(*self.keypair.pk)) + } + + /// Returns the public key portion of the keypair. + pub fn to_public_key(&self) -> NixPublicKey { + NixPublicKey { + name: self.name.clone(), + public: self.keypair.pk, + } + } + + /// Signs a message, returning its canonical representation. + pub fn sign(&self, message: &[u8]) -> String { + let bytes = self.keypair.sk.sign(message, None); + format!("{}:{}", self.name, base64::encode(bytes)) + } + + /// Verifies a message. + pub fn verify(&self, message: &[u8], signature: &str) -> AtticResult<()> { + let (_, bytes) = decode_string(signature, "signature", Signature::BYTES, Some(&self.name))?; + + let bytes: [u8; Signature::BYTES] = bytes.try_into().unwrap(); + let signature = Signature::from_slice(&bytes).map_err(Error::SignatureError)?; + + self.keypair + .pk + .verify(message, &signature) + .map_err(|e| Error::SignatureError(e).into()) + } +} + +impl<'de> Deserialize<'de> for NixKeypair { + /// Deserializes a potentially-invalid Nix keypair from its canonical representation. + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + use de::Error; + String::deserialize(deserializer) + .and_then(|s| Self::from_str(&s).map_err(|e| Error::custom(e.to_string()))) + } +} + +impl Serialize for NixKeypair { + /// Serializes a Nix keypair to its canonical representation. + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + serializer.serialize_str(&self.export_keypair()) + } +} + +impl NixPublicKey { + /// Imports an existing public key from its canonical representation. + pub fn from_str(public_key: &str) -> AtticResult { + let (name, bytes) = decode_string(public_key, "public key", PublicKey::BYTES, None)?; + + let public = PublicKey::from_slice(&bytes).map_err(Error::SignatureError)?; + + Ok(Self { + name: name.to_string(), + public, + }) + } + + /// Returns the Nix-compatible textual representation of the public key. + /// + /// For example, it can look like: + /// attic-test:C929acssgtJoINkUtLbc81GFJPUW9maR77TxEu9ZpRw= + pub fn export(&self) -> String { + format!("{}:{}", self.name, base64::encode(*self.public)) + } + + /// Verifies a message. + pub fn verify(&self, message: &[u8], signature: &str) -> AtticResult<()> { + let (_, bytes) = decode_string(signature, "signature", Signature::BYTES, Some(&self.name))?; + + let bytes: [u8; Signature::BYTES] = bytes.try_into().unwrap(); + let signature = Signature::from_slice(&bytes).map_err(Error::SignatureError)?; + + self.public + .verify(message, &signature) + .map_err(|e| Error::SignatureError(e).into()) + } +} + +/// Validates the name/label of a signing key. +/// +/// A valid name cannot be empty and must not contain colons (:). +fn validate_name(name: &str) -> AtticResult<()> { + if name.is_empty() || name.find(':').is_some() { + Err(Error::InvalidSigningKeyName(name.to_string()).into()) + } else { + Ok(()) + } +} + +/// Decodes a colon-delimited string containing a key name and a base64 payload. +fn decode_string<'s>( + s: &'s str, + usage: &'static str, + expected_payload_length: usize, + expected_name: Option<&str>, +) -> AtticResult<(&'s str, Vec)> { + let colon = s.find(':').ok_or(Error::NoColonSeparator)?; + + let (name, colon_and_payload) = s.split_at(colon); + + validate_name(name)?; + + // don't bother decoding base64 if the name doesn't match + if let Some(expected_name) = expected_name { + if expected_name != name { + return Err(Error::WrongKeyName { + our_name: expected_name.to_string(), + string_name: name.to_string(), + } + .into()); + } + } + + let bytes = base64::decode(&colon_and_payload[1..]).map_err(Error::Base64DecodeError)?; + + if bytes.len() != expected_payload_length { + return Err(Error::InvalidPayloadLength { + actual: bytes.len(), + expected: expected_payload_length, + usage, + } + .into()); + } + + Ok((name, bytes)) +} diff --git a/attic/src/signing/tests.rs b/attic/src/signing/tests.rs new file mode 100644 index 0000000..a3de4ae --- /dev/null +++ b/attic/src/signing/tests.rs @@ -0,0 +1,68 @@ +use super::*; + +#[test] +fn test_generate_key() { + let keypair = NixKeypair::generate("attic-test").expect("Could not generate key"); + + let export_priv = keypair.export_keypair(); + let export_pub = keypair.export_public_key(); + + eprintln!("Private key: {}", export_priv); + eprintln!(" Public key: {}", export_pub); + + // re-import keypair + let import = NixKeypair::from_str(&export_priv).expect("Could not re-import generated key"); + + assert_eq!(keypair.name, import.name); + assert_eq!(keypair.keypair, import.keypair); + + // re-import public key + let import_pub = NixPublicKey::from_str(&export_pub).expect("Could not re-import public key"); + + assert_eq!(keypair.name, import_pub.name); + assert_eq!(keypair.keypair.pk, import_pub.public); + + // test the export functionality of NixPublicKey as well + let export_pub2 = import_pub.export(); + let import_pub2 = NixPublicKey::from_str(&export_pub2).expect("Could not re-import public key"); + + assert_eq!(keypair.name, import_pub2.name); + assert_eq!(keypair.keypair.pk, import_pub2.public); +} + +#[test] +fn test_serde() { + let json = "\"attic-test:x326WFy/JUl+MQnN1u9NPdWQPBbcVn2mwoIqSLS3DmQqZ8qT8rBSxxEnyhtl3jDouBqodlyfq6F+HsVhbTYPMA==\""; + + let keypair: NixKeypair = serde_json::from_str(json).expect("Could not deserialize keypair"); + + let export = serde_json::to_string(&keypair).expect("Could not serialize keypair"); + + eprintln!("Public Key: {}", keypair.export_public_key()); + + assert_eq!(json, &export); +} + +#[test] +fn test_import_public_key() { + let cache_nixos_org = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="; + let import = NixPublicKey::from_str(cache_nixos_org).expect("Could not import public key"); + + assert_eq!(cache_nixos_org, import.export()); +} + +#[test] +fn test_signing() { + let keypair = NixKeypair::generate("attic-test").expect("Could not generate key"); + + let public = keypair.to_public_key(); + + let message = b"hello world"; + + let signature = keypair.sign(message); + + keypair.verify(message, &signature).unwrap(); + public.verify(message, &signature).unwrap(); + + keypair.verify(message, "attic-test:lo9EfNIL4eGRuNh7DTbAAffWPpI2SlYC/8uP7JnhgmfRIUNGhSbFe8qEaKN0mFS02TuhPpXFPNtRkFcCp0hGAQ==").unwrap_err(); +} diff --git a/attic/src/stream.rs b/attic/src/stream.rs new file mode 100644 index 0000000..f37bb62 --- /dev/null +++ b/attic/src/stream.rs @@ -0,0 +1,110 @@ +//! Stream utilities. + +use std::marker::Unpin; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use digest::{Digest, Output as DigestOutput}; +use tokio::io::{AsyncRead, ReadBuf}; +use tokio::sync::OnceCell; + +/// Stream filter that hashes the bytes that have been read. +/// +/// The hash is finalized when EOF is reached. +pub struct StreamHasher { + inner: R, + digest: Option, + bytes_read: usize, + finalized: Arc, usize)>>, +} + +impl StreamHasher { + pub fn new(inner: R, digest: D) -> (Self, Arc, usize)>>) { + let finalized = Arc::new(OnceCell::new()); + + ( + Self { + inner, + digest: Some(digest), + bytes_read: 0, + finalized: finalized.clone(), + }, + finalized, + ) + } +} + +impl AsyncRead for StreamHasher { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + let old_filled = buf.filled().len(); + let r = Pin::new(&mut self.inner).poll_read(cx, buf); + let read_len = buf.filled().len() - old_filled; + + match r { + Poll::Ready(Ok(())) => { + if read_len == 0 { + // EOF + if let Some(digest) = self.digest.take() { + self.finalized + .set((digest.finalize(), self.bytes_read)) + .expect("Hash has already been finalized"); + } + } else { + // Read something + let digest = self.digest.as_mut().expect("Stream has data after EOF"); + + let filled = buf.filled(); + digest.update(&filled[filled.len() - read_len..]); + self.bytes_read += read_len; + } + } + Poll::Ready(Err(_)) => { + assert!(read_len == 0); + } + Poll::Pending => {} + } + + r + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use tokio::io::AsyncReadExt; + use tokio_test::block_on; + + #[test] + fn test_stream_hasher() { + let expected = b"hello world"; + let expected_sha256 = + hex::decode("b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9") + .unwrap(); + + let (mut read, finalized) = StreamHasher::new(expected.as_slice(), sha2::Sha256::new()); + assert!(finalized.get().is_none()); + + // force multiple reads + let mut buf = vec![0u8; 100]; + let mut bytes_read = 0; + bytes_read += block_on(read.read(&mut buf[bytes_read..bytes_read + 5])).unwrap(); + bytes_read += block_on(read.read(&mut buf[bytes_read..bytes_read + 5])).unwrap(); + bytes_read += block_on(read.read(&mut buf[bytes_read..bytes_read + 5])).unwrap(); + bytes_read += block_on(read.read(&mut buf[bytes_read..bytes_read + 5])).unwrap(); + + assert_eq!(expected.len(), bytes_read); + assert_eq!(expected, &buf[..bytes_read]); + + let (hash, count) = finalized.get().expect("Hash wasn't finalized"); + + assert_eq!(expected_sha256.as_slice(), hash.as_slice()); + assert_eq!(expected.len(), *count); + eprintln!("finalized = {:x?}", finalized); + } +} diff --git a/attic/src/testing/mod.rs b/attic/src/testing/mod.rs new file mode 100644 index 0000000..191925e --- /dev/null +++ b/attic/src/testing/mod.rs @@ -0,0 +1,3 @@ +//! Utilities for testing. + +pub mod shadow_store; diff --git a/attic/src/testing/shadow_store/mod.rs b/attic/src/testing/shadow_store/mod.rs new file mode 100644 index 0000000..2ef6da7 --- /dev/null +++ b/attic/src/testing/shadow_store/mod.rs @@ -0,0 +1,117 @@ +//! Shadow Nix store. +//! +//! Since Nix 2.0, Nix can use an alternative root for the store via +//! `--store` while keeping the same `storeDir`. To test pulling from +//! an Attic server with vanilla Nix, we create a temporary root +//! for the store, as well as `nix.conf` and `netrc` configurations +//! required to connect to an Attic server. +//! +//! ## Manual example +//! +//! ```bash +//! NIX_CONF_DIR="$SHADOW/etc/nix" NIX_USER_CONF_FILES="" NIX_REMOTE="" \ +//! nix-store --store "$SHADOW" -r /nix/store/h8fxhm945jlsfxlr4rvkkqlws771l07c-nix-2.7pre20220127_558c4ee -v +//! ``` +//! +//! `nix.conf`: +//! +//! ```text +//! substituters = http://localhost:8080/attic-test +//! trusted-public-keys = attic-test:KmfKk/KwUscRJ8obZd4w6LgaqHZcn6uhfh7FYW02DzA= +//! ``` +//! +//! `netrc`: +//! +//! ```text +//! machine localhost password eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjQwNzA5MDg4MDAsImh0dHBzOi8vemhhb2ZlbmdsaS5naXRodWIuaW8vYXR0aWMiOnsieC1hdHRpYy1hY2Nlc3MiOnsiY2FjaGVzIjp7IioiOnsicHVzaCI6dHJ1ZSwicHVsbCI6dHJ1ZX19fX19.58WIuL8H_fQGEPmUG7U61FUHtAmsHXanYtQFSgqni6U +//! ``` + +use std::ffi::OsString; +use std::fs::{self, Permissions}; +use std::os::unix::fs::PermissionsExt; +use std::path::Path; + +use tempfile::{Builder as TempfileBuilder, TempDir}; + +const WRAPPER_TEMPLATE: &str = include_str!("nix-wrapper.sh"); + +/// A shadow Nix store. +/// +/// After creation, wrappers of common Nix executables will be +/// available under `bin`, allowing you to easily interact with +/// the shadow store. +pub struct ShadowStore { + store_root: TempDir, +} + +impl ShadowStore { + pub fn new() -> Self { + let store_root = TempfileBuilder::new() + .prefix("shadow-store-") + .tempdir() + .expect("failed to create temporary root"); + + fs::create_dir_all(store_root.path().join("etc/nix")) + .expect("failed to create temporary config dir"); + + fs::create_dir_all(store_root.path().join("bin")) + .expect("failed to create temporary wrapper dir"); + + let store = Self { store_root }; + store.create_wrapper("nix-store"); + + store + } + + /// Returns the path to the store root. + pub fn path(&self) -> &Path { + self.store_root.path() + } + + /// Returns the path to the `nix-store` wrapper. + pub fn nix_store_cmd(&self) -> OsString { + self.store_root + .path() + .join("bin/nix-store") + .as_os_str() + .to_owned() + } + + /// Creates a wrapper script for a Nix command. + fn create_wrapper(&self, command: &str) { + let path = self.store_root.path().join("bin").join(command); + let permissions = Permissions::from_mode(0o755); + let wrapper = WRAPPER_TEMPLATE + .replace("%command%", command) + .replace("%store_root%", &self.store_root.path().to_string_lossy()); + + fs::write(&path, wrapper).expect("failed to write wrapper script"); + + fs::set_permissions(&path, permissions).expect("failed to set wrapper permissions"); + } +} + +impl Drop for ShadowStore { + fn drop(&mut self) { + // recursively set write permissions on directories so we can + // cleanly delete the entire store + + fn walk(dir: &Path) { + // excuse the unwraps + let metadata = fs::metadata(dir).unwrap(); + let mut permissions = metadata.permissions(); + permissions.set_mode(permissions.mode() | 0o200); + fs::set_permissions(dir, permissions).unwrap(); + + for entry in fs::read_dir(dir).unwrap() { + let entry = entry.unwrap(); + + if entry.file_type().unwrap().is_dir() { + walk(&entry.path()); + } + } + } + + walk(self.store_root.path()); + } +} diff --git a/attic/src/testing/shadow_store/nix-wrapper.sh b/attic/src/testing/shadow_store/nix-wrapper.sh new file mode 100644 index 0000000..7307ac8 --- /dev/null +++ b/attic/src/testing/shadow_store/nix-wrapper.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +export NIX_CONF_DIR="%store_root%/etc/nix" +export NIX_USER_CONF_FILE="" +export NIX_REMOTE="" + +exec %command% --store "%store_root%" "$@" diff --git a/attic/src/util.rs b/attic/src/util.rs new file mode 100644 index 0000000..e95d7d9 --- /dev/null +++ b/attic/src/util.rs @@ -0,0 +1,39 @@ +//! Misc utilities. + +use std::future::Future; +use std::mem; + +use tokio::task; + +/// Runs a future when dropped. +/// +/// This is used to clean up external resources that are +/// difficult to correctly model using ownerships. +pub struct Finally +where + F::Output: Send + 'static, +{ + f: Option, +} + +impl Finally +where + F::Output: Send + 'static, +{ + pub fn new(f: F) -> Self { + Self { f: Some(f) } + } + + pub fn cancel(self) { + mem::forget(self); + } +} + +impl Drop for Finally +where + F::Output: Send + 'static, +{ + fn drop(&mut self) { + task::spawn(self.f.take().unwrap()); + } +} diff --git a/book/.gitignore b/book/.gitignore new file mode 100644 index 0000000..7585238 --- /dev/null +++ b/book/.gitignore @@ -0,0 +1 @@ +book diff --git a/book/book.toml b/book/book.toml new file mode 100644 index 0000000..789e476 --- /dev/null +++ b/book/book.toml @@ -0,0 +1,10 @@ +[book] +authors = ["Zhaofeng Li"] +language = "en" +multilingual = false +src = "src" +title = "Attic" + +[output.html] +git-repository-url = "https://github.com/zhaofengli/attic" +edit-url-template = "https://github.com/zhaofengli/attic/edit/main/book/{path}" diff --git a/book/colorized-help.nix b/book/colorized-help.nix new file mode 100644 index 0000000..9e7c54f --- /dev/null +++ b/book/colorized-help.nix @@ -0,0 +1,43 @@ +{ lib, stdenv, runCommand, attic, ansi2html }: + +with builtins; + +let + commands = { + attic = [ + null + "login" + "use" + "push" + "cache" + "cache create" + "cache configure" + "cache destroy" + "cache info" + ]; + atticd = [ + null + ]; + atticadm = [ + null + "make-token" + ]; + }; + renderMarkdown = name: subcommands: '' + mkdir -p $out + ( + ansi2html -H + ${lib.concatMapStrings (subcommand: let + fullCommand = "${name} ${if subcommand == null then "" else subcommand}"; + in "${renderCommand fullCommand}\n") subcommands} + ) >>$out/${name}.md + ''; + renderCommand = fullCommand: '' + echo '## `${fullCommand}`' + echo -n '
' + TERM=xterm-256color CLICOLOR_FORCE=1 ${fullCommand} --help | ansi2html -p + echo '
' + ''; +in runCommand "attic-colorized-help" { + nativeBuildInputs = [ attic ansi2html ]; +} (concatStringsSep "\n" (lib.mapAttrsToList renderMarkdown commands)) diff --git a/book/default.nix b/book/default.nix new file mode 100644 index 0000000..8c31811 --- /dev/null +++ b/book/default.nix @@ -0,0 +1,40 @@ +{ lib, stdenv, nix-gitignore, mdbook, mdbook-linkcheck, python3, callPackage, writeScript +, attic ? null +}: + +let + colorizedHelp = let + help = callPackage ./colorized-help.nix { + inherit attic; + }; + in if attic != null then help else null; +in stdenv.mkDerivation { + inherit colorizedHelp; + + name = "attic-book"; + + src = nix-gitignore.gitignoreSource [] ./.; + + nativeBuildInputs = [ mdbook ]; + + buildPhase = '' + emitColorizedHelp() { + command=$1 + + if [[ -n "$colorizedHelp" ]]; then + cat "$colorizedHelp/$command.md" >> src/reference/$command-cli.md + else + echo "Error: No attic executable passed to the builder" >> src/reference/$command-cli.md + fi + } + + emitColorizedHelp attic + emitColorizedHelp atticd + emitColorizedHelp atticadm + + mdbook build -d ./build + cp -r ./build $out + ''; + + installPhase = "true"; +} diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md new file mode 100644 index 0000000..10cc8d9 --- /dev/null +++ b/book/src/SUMMARY.md @@ -0,0 +1,11 @@ +# Summary + +- [Introduction](./introduction.md) +- [Tutorial](./tutorial.md) +- [User Guide](./user-guide/README.md) +- [Admin Guide](./admin-guide/README.md) +- [FAQs](./faqs.md) +- [Reference](./reference/README.md) + - [attic](./reference/attic-cli.md) + - [atticd](./reference/atticd-cli.md) + - [atticadm](./reference/atticadm-cli.md) diff --git a/book/src/admin-guide/README.md b/book/src/admin-guide/README.md new file mode 100644 index 0000000..641c6ee --- /dev/null +++ b/book/src/admin-guide/README.md @@ -0,0 +1,3 @@ +# Admin Guide + +> This section is under construction. diff --git a/book/src/faqs.md b/book/src/faqs.md new file mode 100644 index 0000000..ae07ef4 --- /dev/null +++ b/book/src/faqs.md @@ -0,0 +1,37 @@ +# FAQs + +## Does it replace [Cachix](https://www.cachix.org)? + +No, it does not. +Cachix is an awesome product and the direct inspiration for the user experience of Attic. +It works at a much larger scale than Attic and is a proven solution. +Numerous open-source projects in the Nix community (including mine!) use Cachix to share publicly-available binaries. + +Attic can be thought to provide a similar user experience at a much smaller scale (personal or team use). + +## What happens if a user uploads a path that is already in the global cache? + +The user will still fully upload the path to the server because they have to prove possession of the file. +The difference is that instead of having the upload streamed to the storage backend (e.g., S3), it's only run through a hash function and discarded. +Once the NAR hash is confirmed, a mapping is created to grant the local cache access to the global NAR. +The global deduplication behavior is transparent to the client. + +In the future, schemes to prove data possession without fully uploading the file may be supported. + +## What happens if a user uploads a path with incorrect/malicious metadata? + +They will only pollute their own cache. +Path metadata (store path, references, deriver, etc.) are associated with the local cache and the global cache only contains content-addressed NARs that are "context-free." + +## How is authentication handled? + +Authentication is done via signed JWTs containing the allowed permissions. +Each instance of `atticd --mode api-server` is stateless. +This design may be revisited later, with option for a more stateful method of authentication. + +## On what granularity is deduplication done? + +Currently, global deduplication is done on the level of NAR files. +File or chunk-level deduplication (e.g., casync) may be added later. +It remains to be seen how NAR reassembly can be done in a user-friendly yet economical manner. +On compute services, outbound traffic often isn't free while several S3-compatible storage services provide free egress (e.g., [Cloudflare R2](https://developers.cloudflare.com/r2/platform/pricing/)). diff --git a/book/src/introduction.md b/book/src/introduction.md new file mode 100644 index 0000000..d6f12c3 --- /dev/null +++ b/book/src/introduction.md @@ -0,0 +1,23 @@ +# Introduction + +**Attic** is a self-hostable Nix Binary Cache server backed by an S3-compatible storage provider. +It has support for global deduplication and garbage collection. + +Attic is still an early prototype and is looking for more testers. Want to jump in? [Start your own Attic server](./tutorial.md) in 15 minutes. + +``` +⚙️ Pushing 5 paths to "demo" on "local" (566 already cached, 2001 in upstream)... +✅ gnvi1x7r8kl3clzx0d266wi82fgyzidv-steam-run-fhs (29.69 MiB/s) +✅ rw7bx7ak2p02ljm3z4hhpkjlr8rzg6xz-steam-fhs (30.56 MiB/s) +✅ y92f9y7qhkpcvrqhzvf6k40j6iaxddq8-0p36ammvgyr55q9w75845kw4fw1c65ln-source (19.96 MiB/s) +🕒 vscode-1.74.2 ███████████████████████████████████████ 345.66 MiB (41.32 MiB/s) +🕓 zoom-5.12.9.367 ███████████████████████████ 329.36 MiB (39.47 MiB/s) +``` + +## Goals + +- **Multi-Tenancy**: Create a private cache for yourself, and one for friends and co-workers. Tenants are mutually untrusting and cannot pollute the views of other caches. +- **Global Deduplication**: Individual caches (tenants) are simply restricted views of the content-addressed global cache. When paths are uploaded, a mapping is created to grant the local cache access to the global NAR. +- **Managed Signing**: Signing is done on-the-fly by the server when store paths are fetched. The user pushing store paths does not have access to the signing key. +- **High Availability**: Attic can be easily replicated. It's designed to be deployed to serverless platforms like fly.io but also works nicely in a single-machine setup. +- **Garbage Collection**: Unused store paths can be garbage-collected in an LRU manner. diff --git a/book/src/reference/README.md b/book/src/reference/README.md new file mode 100644 index 0000000..fe0da2b --- /dev/null +++ b/book/src/reference/README.md @@ -0,0 +1,7 @@ +# Reference + +This section contains detailed listings of options and parameters accepted by Attic: + +- [`attic` CLI](./attic-cli.md) +- [`atticd` CLI](./atticd-cli.md) +- [`atticadm` CLI](./atticadm-cli.md) diff --git a/book/src/reference/attic-cli.md b/book/src/reference/attic-cli.md new file mode 100644 index 0000000..96facb1 --- /dev/null +++ b/book/src/reference/attic-cli.md @@ -0,0 +1,12 @@ +# `attic` CLI + +The following are the help messages that will be printed when you invoke any sub-command with `--help`: + + diff --git a/book/src/reference/atticadm-cli.md b/book/src/reference/atticadm-cli.md new file mode 100644 index 0000000..415ce9f --- /dev/null +++ b/book/src/reference/atticadm-cli.md @@ -0,0 +1,11 @@ +# `atticadm` CLI + +The following are the help messages that will be printed when you invoke any sub-command with `--help`: + + diff --git a/book/src/reference/atticd-cli.md b/book/src/reference/atticd-cli.md new file mode 100644 index 0000000..ff4f071 --- /dev/null +++ b/book/src/reference/atticd-cli.md @@ -0,0 +1,11 @@ +# `atticd` CLI + +The following are the help messages that will be printed when you invoke any sub-command with `--help`: + + diff --git a/book/src/tutorial.md b/book/src/tutorial.md new file mode 100644 index 0000000..b39e0ce --- /dev/null +++ b/book/src/tutorial.md @@ -0,0 +1,204 @@ +# Tutorial + +Let's spin up Attic in just 15 minutes (yes, it works on macOS too!): + +```bash +nix-shell https://github.com/zhaofengli/attic/tarball/main -A demo +``` + +Simply run `atticd` to start the server in monolithic mode with a SQLite database and local storage: + +```console +$ atticd +Attic Server 0.1.0 (release) + +----------------- +Welcome to Attic! + +A simple setup using SQLite and local storage has been configured for you in: + + /home/zhaofeng/.config/attic/server.toml + +Run the following command to log into this server: + + attic login local http://localhost:8080 eyJ... + +Documentations and guides: + + https://docs.attic.rs + +Enjoy! +----------------- + +Running migrations... +Starting API server... +Listening on [::]:8080... +``` + +## Cache Creation + +`atticd` is the server, and `attic` is the client. +We can now log in and create a cache: + +```console +# Copy and paste from the atticd output +$ attic login local http://localhost:8080 eyJ... +✍️ Configuring server "local" + +$ attic cache create hello +✨ Created cache "hello" on "local" +``` + +## Pushing + +Let's push `attic` itself to the cache: + +```console +$ attic push hello $(which attic) +⚙️ Pushing 1 paths to "hello" on "local" (0 already cached, 45 in upstream)... +✅ r5d7217c0rjd5iiz1g2nhvd15frck9x2-attic-0.1.0 (52.89 MiB/s) +``` + +The interesting thing is that `attic` automatically skipped over store paths cached by `cache.nixos.org`! +This behavior can be configured on a per-cache basis. + +Note that Attic performs content-addressed global deduplication, so when you upload the same store path to another cache, the underlying NAR is only stored once. +Each cache is essentially a restricted view of the global cache. + +## Pulling + +Now, let's pull it back from the cache. +For demonstration purposes, let's use `--store` to make Nix download to another directory because Attic already exists in `/nix/store`: + +```console +# Automatically configures ~/.config/nix/nix.conf for you +$ attic use hello +Configuring Nix to use "hello" on "local": ++ Substituter: http://localhost:8080/hello ++ Trusted Public Key: hello:vlsd7ZHIXNnKXEQShVnd7erE8zcuSKrBWRpV6zTibnA= ++ Access Token + +$ nix-store --store $PWD/nix-demo -r $(which attic) +[snip] +copying path '/nix/store/r5d7217c0rjd5iiz1g2nhvd15frck9x2-attic-0.1.0' from 'http://localhost:8080/hello'... +warning: you did not specify '--add-root'; the result might be removed by the garbage collector +/nix/store/r5d7217c0rjd5iiz1g2nhvd15frck9x2-attic-0.1.0 + +$ ls nix-demo/nix/store/r5d7217c0rjd5iiz1g2nhvd15frck9x2-attic-0.1.0/bin/attic +nix-demo/nix/store/r5d7217c0rjd5iiz1g2nhvd15frck9x2-attic-0.1.0/bin/attic +``` + +Note that to pull into the actual Nix Store, your user must be considered [trusted](https://nixos.org/manual/nix/stable/command-ref/conf-file.html#conf-trusted-users) by the `nix-daemon`. + +## Access Control + +Attic performs stateless authentication using signed JWT tokens which contain permissions. +The root token printed out by `atticd` is all-powerful and should not be shared. + +Let's create another token that can only access the `hello` cache: + +```console +$ atticadm make-token --sub alice --validity '3 months' --pull hello --push hello +eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJhbGljZSIsImV4cCI6MTY4MDI5MzMzOSwiaHR0cHM6Ly9qd3QuYXR0aWMucnMvdjEiOnsiY2FjaGVzIjp7ImhlbGxvIjp7InIiOjEsInciOjF9fX19.XJsaVfjrX5l7p9z76836KXP6Vixn41QJUfxjiK7D-LM +``` + +Let's say Alice wants to have her own caches. +Instead of creating caches for her, we can let her do it herself: + +```console +$ atticadm make-token --sub alice --validity '3 months' --pull 'alice-*' --push 'alice-*' --create-cache 'alice-*' +eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJhbGljZSIsImV4cCI6MTY4MDI5MzQyNSwiaHR0cHM6Ly9qd3QuYXR0aWMucnMvdjEiOnsiY2FjaGVzIjp7ImFsaWNlLSoiOnsiciI6MSwidyI6MSwiY2MiOjF9fX19.MkSnK6yGDWYUVnYiJF3tQgdTlqstfWlbziFWUr-lKUk +``` + +Now Alice can use this token to _create_ any cache beginning with `alice-` and push to them. +Try passing `--dump-claims` to show the JWT claims without encoding the token to see what's going on. + +## Going Public + +Let's make the cache public. Making it public gives unauthenticated users pull access: + +```console +$ attic cache configure hello --public +✅ Configured "hello" on "local" + +# Now we can query the cache without being authenticated +$ curl http://localhost:8080/hello/nix-cache-info +WantMassQuery: 1 +StoreDir: /nix/store +Priority: 41 +``` + +## Garbage Collection + +It's a bad idea to let binary caches grow unbounded. +Let's configure garbage collection on the cache to automatically delete objects that haven't been accessed in a while: + +``` +$ attic cache configure hello --retention-period '1s' +✅ Configured "hello" on "local" +``` + +Now the retention period is only one second. +Instead of waiting for the periodic garbage collection to occur (see `server.toml`), let's trigger it manually: + +```bash +atticd --mode garbage-collector-once +``` + +Now the store path doesn't exist on the cache anymore! + +```console +$ nix-store --store $PWD/nix-demo-2 -r $(which attic) +don't know how to build these paths: + /nix/store/v660wl07i1lcrrgpr1yspn2va5d1xgjr-attic-0.1.0 +error: build of '/nix/store/v660wl07i1lcrrgpr1yspn2va5d1xgjr-attic-0.1.0' failed + +$ curl http://localhost:8080/hello/v660wl07i1lcrrgpr1yspn2va5d1xgjr.narinfo +{"code":404,"error":"NoSuchObject","message":"The requested object does not exist."} +``` + +Let's reset it back to the default, which is to not garbage collect (configure it in `server.toml`): + +```console +$ attic cache configure hello --reset-retention-period +✅ Configured "hello" on "local" + +$ attic cache info hello + Public: true + Public Key: hello:vlsd7ZHIXNnKXEQShVnd7erE8zcuSKrBWRpV6zTibnA= +Binary Cache Endpoint: http://localhost:8080/hello + API Endpoint: http://localhost:8080/ + Store Directory: /nix/store + Priority: 41 + Upstream Cache Keys: ["cache.nixos.org-1"] + Retention Period: Global Default +``` + +Because of Attic's global deduplication, garbage collection actually happens on two levels: + +1. **Local Cache**: When an object is garbage collected, only the mapping between the metadata in the local cache and the NAR in the global cache gets deleted. The local cache loses access to the NAR, but the storage isn't freed. +2. **Global Cache**: Orphan NARs not referenced by any local cache then become eligible for deletion. This time the storage space is actually freed and subsequent uploads of the same NAR will actually trigger an upload to the storage backend. + +## Summary + +In just a few commands, we have: + +1. Set up a new Attic server and a binary cache +2. Pushed store paths to it +3. Configured Nix to use the new binary cache +4. Generated access tokens that provide restricted access +5. Made the cache public +6. Performed garbage collection + +## What's next + +> Note: Attic is an early prototype and everything is subject to change! It may be full of holes and APIs may be changed without backward-compatibility. You might even be required to reset the entire database. I would love to have people give it a try, but please keep that in mind ️:) + +For a less temporary setup, you can set up `atticd` with PostgreSQL and S3. +You should also place it behind a load balancer like NGINX to provide HTTPS. +Take a look at `~/.config/attic/server.toml` to see what you can configure! + +While it's easy to get started by running `atticd` in monolithic mode, for production use it's best to run different components of `atticd` separately with `--mode`: + +- `worker`: Stateless and can be replicated. +- `garbage-collector`: Performs periodic garbage collection. Cannot be replicated. diff --git a/book/src/user-guide/README.md b/book/src/user-guide/README.md new file mode 100644 index 0000000..8ce8720 --- /dev/null +++ b/book/src/user-guide/README.md @@ -0,0 +1,40 @@ +# User Guide + +## Logging in + +You should have received an `attic login` command from an admin like the following: + +``` +attic login central https://attic.domain.tld/ eyJ... +``` + +The `attic` client can work with multiple servers at the same time. +To select the `foo` cache from server `central`, use one of the following: + +- `foo`, if the `central` server is configured as the default +- `central:foo` + +To configure the default server, set `default-server` in `~/.config/attic/config.toml`. + +## Enabling a cache + +To configure Nix to automatically use cache `foo`: + +``` +attic use foo +``` + +## Pushing to the cache + +To push a store path to cache `foo`: + +```bash +attic push foo /nix/store/... +``` + +Other examples include: + +```bash +attic push foo ./result +attic push foo /run/current-system +``` diff --git a/client/Cargo.toml b/client/Cargo.toml new file mode 100644 index 0000000..24166fe --- /dev/null +++ b/client/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "attic-client" +version = "0.1.0" +edition = "2021" +publish = false + +[[bin]] +name = "attic" +path = "src/main.rs" + +[dependencies] +attic = { path = "../attic" } + +anyhow = "1.0.68" +bytes = "1.3.0" +clap = { version = "4.0", features = ["derive"] } +clap_complete = "4.0.2" +const_format = "0.2.30" +dialoguer = "0.10.2" +displaydoc = "0.2.3" +enum-as-inner = "0.5.1" +futures = "0.3.25" +humantime = "2.1.0" +indicatif = "0.17.2" +lazy_static = "1.4.0" +regex = "1.7.0" +reqwest = { version = "0.11.13", default-features = false, features = ["json", "rustls-tls", "stream"] } +serde = { version = "1.0.151", features = ["derive"] } +serde_json = "1.0.91" +toml = "0.5.10" +tracing = "0.1.37" +tracing-subscriber = "0.3.16" +xdg = "2.4.1" + +[dependencies.tokio] +version = "1.23.0" +features = [ + "full" +] diff --git a/client/src/api/mod.rs b/client/src/api/mod.rs new file mode 100644 index 0000000..65a9c49 --- /dev/null +++ b/client/src/api/mod.rs @@ -0,0 +1,219 @@ +use std::error::Error as StdError; +use std::fmt; + +use anyhow::Result; +use bytes::Bytes; +use const_format::concatcp; +use displaydoc::Display; +use futures::TryStream; +use reqwest::{ + header::{HeaderMap, HeaderValue, AUTHORIZATION, USER_AGENT}, + Body, Client as HttpClient, Response, StatusCode, Url, +}; +use serde::Deserialize; + +use crate::config::ServerConfig; +use crate::version::ATTIC_DISTRIBUTOR; +use attic::api::v1::cache_config::{CacheConfig, CreateCacheRequest}; +use attic::api::v1::get_missing_paths::{GetMissingPathsRequest, GetMissingPathsResponse}; +use attic::api::v1::upload_path::UploadPathNarInfo; +use attic::cache::CacheName; +use attic::nix_store::StorePathHash; + +/// The User-Agent string of Attic. +const ATTIC_USER_AGENT: &str = + concatcp!("Attic/{} ({})", env!("CARGO_PKG_NAME"), ATTIC_DISTRIBUTOR); + +/// The Attic API client. +#[derive(Debug, Clone)] +pub struct ApiClient { + /// Base endpoint of the server. + endpoint: Url, + + /// An initialized HTTP client. + client: HttpClient, +} + +/// An API error. +#[derive(Debug, Display)] +pub enum ApiError { + /// {0} + Structured(StructuredApiError), + + /// HTTP {0}: {1} + Unstructured(StatusCode, String), +} + +#[derive(Debug, Clone, Deserialize)] +pub struct StructuredApiError { + #[allow(dead_code)] + code: u16, + error: String, + message: String, +} + +impl ApiClient { + pub fn from_server_config(config: ServerConfig) -> Result { + let client = build_http_client(config.token.as_deref()); + + Ok(Self { + endpoint: Url::parse(&config.endpoint)?, + client, + }) + } + + /// Returns the configuration of a cache. + pub async fn get_cache_config(&self, cache: &CacheName) -> Result { + let endpoint = self + .endpoint + .join("_api/v1/cache-config/")? + .join(cache.as_str())?; + + let res = self.client.get(endpoint).send().await?; + + if res.status().is_success() { + let cache_config = res.json().await?; + Ok(cache_config) + } else { + let api_error = ApiError::try_from_response(res).await?; + Err(api_error.into()) + } + } + + /// Creates a cache. + pub async fn create_cache(&self, cache: &CacheName, request: CreateCacheRequest) -> Result<()> { + let endpoint = self + .endpoint + .join("_api/v1/cache-config/")? + .join(cache.as_str())?; + + let res = self.client.post(endpoint).json(&request).send().await?; + + if res.status().is_success() { + Ok(()) + } else { + let api_error = ApiError::try_from_response(res).await?; + Err(api_error.into()) + } + } + + /// Configures a cache. + pub async fn configure_cache(&self, cache: &CacheName, config: &CacheConfig) -> Result<()> { + let endpoint = self + .endpoint + .join("_api/v1/cache-config/")? + .join(cache.as_str())?; + + let res = self.client.patch(endpoint).json(&config).send().await?; + + if res.status().is_success() { + Ok(()) + } else { + let api_error = ApiError::try_from_response(res).await?; + Err(api_error.into()) + } + } + + /// Destroys a cache. + pub async fn destroy_cache(&self, cache: &CacheName) -> Result<()> { + let endpoint = self + .endpoint + .join("_api/v1/cache-config/")? + .join(cache.as_str())?; + + let res = self.client.delete(endpoint).send().await?; + + if res.status().is_success() { + Ok(()) + } else { + let api_error = ApiError::try_from_response(res).await?; + Err(api_error.into()) + } + } + + /// Returns paths missing from a cache. + pub async fn get_missing_paths( + &self, + cache: &CacheName, + store_path_hashes: Vec, + ) -> Result { + let endpoint = self.endpoint.join("_api/v1/get-missing-paths")?; + let payload = GetMissingPathsRequest { + cache: cache.to_owned(), + store_path_hashes, + }; + + let res = self.client.post(endpoint).json(&payload).send().await?; + + if res.status().is_success() { + let cache_config = res.json().await?; + Ok(cache_config) + } else { + let api_error = ApiError::try_from_response(res).await?; + Err(api_error.into()) + } + } + + /// Uploads a path. + pub async fn upload_path(&self, nar_info: UploadPathNarInfo, stream: S) -> Result<()> + where + S: TryStream + Send + Sync + 'static, + S::Error: Into>, + Bytes: From, + { + let endpoint = self.endpoint.join("_api/v1/upload-path")?; + let upload_info_json = serde_json::to_string(&nar_info)?; + + let res = self + .client + .put(endpoint) + .header( + "X-Attic-Nar-Info", + HeaderValue::from_str(&upload_info_json)?, + ) + .header(USER_AGENT, HeaderValue::from_str(ATTIC_USER_AGENT)?) + .body(Body::wrap_stream(stream)) + .send() + .await?; + + if res.status().is_success() { + Ok(()) + } else { + let api_error = ApiError::try_from_response(res).await?; + Err(api_error.into()) + } + } +} + +impl StdError for ApiError {} + +impl ApiError { + async fn try_from_response(response: Response) -> Result { + let status = response.status(); + let text = response.text().await?; + match serde_json::from_str(&text) { + Ok(s) => Ok(Self::Structured(s)), + Err(_) => Ok(Self::Unstructured(status, text)), + } + } +} + +impl fmt::Display for StructuredApiError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}: {}", self.error, self.message) + } +} + +fn build_http_client(token: Option<&str>) -> HttpClient { + let mut headers = HeaderMap::new(); + + if let Some(token) = token { + let auth_header = HeaderValue::from_str(&format!("bearer {}", token)).unwrap(); + headers.insert(AUTHORIZATION, auth_header); + } + + reqwest::Client::builder() + .default_headers(headers) + .build() + .unwrap() +} diff --git a/client/src/cache.rs b/client/src/cache.rs new file mode 100644 index 0000000..f743d83 --- /dev/null +++ b/client/src/cache.rs @@ -0,0 +1,77 @@ +//! Client-specific cache references. +//! +//! The Attic client is designed to work with multiple servers. +//! Therefore, users can refer to caches in the following forms: +//! +//! - `cachename`: Will use `cachename` on the default server +//! - `servername:cachename`: Will use `cachename` on server `servername` +//! - `https://cache.server.tld/username`: Will auto-detect +//! - To be implemented + +use std::ops::Deref; +use std::str::FromStr; + +use anyhow::{anyhow, Result}; +use serde::{Deserialize, Serialize}; + +pub use attic::cache::{CacheName, CacheNamePattern}; + +/// A reference to a cache. +#[derive(Debug, Clone)] +pub enum CacheRef { + DefaultServer(CacheName), + ServerQualified(ServerName, CacheName), +} + +/// A server name. +/// +/// It has the same requirements as a cache name. +#[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] +#[serde(transparent)] +pub struct ServerName(CacheName); + +impl CacheRef { + fn try_parse_cache(s: &str) -> Option { + let name = CacheName::new(s.to_owned()).ok()?; + Some(Self::DefaultServer(name)) + } + + fn try_parse_server_qualified(s: &str) -> Option { + let (server, cache) = s.split_once(':')?; + let server = CacheName::new(server.to_owned()).ok()?; + let cache = CacheName::new(cache.to_owned()).ok()?; + Some(Self::ServerQualified(ServerName(server), cache)) + } +} + +impl FromStr for CacheRef { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + if let Some(r) = Self::try_parse_cache(s) { + return Ok(r); + } + + if let Some(r) = Self::try_parse_server_qualified(s) { + return Ok(r); + } + + Err(anyhow!("Invalid cache reference")) + } +} + +impl Deref for ServerName { + type Target = CacheName; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl FromStr for ServerName { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(Self(CacheName::from_str(s)?)) + } +} diff --git a/client/src/cli.rs b/client/src/cli.rs new file mode 100644 index 0000000..db93b49 --- /dev/null +++ b/client/src/cli.rs @@ -0,0 +1,69 @@ +//! Global CLI Setup. + +use std::env; + +use anyhow::{anyhow, Result}; +use clap::{CommandFactory, Parser, Subcommand}; +use clap_complete::Shell; +use enum_as_inner::EnumAsInner; + +use crate::command::cache::{self, Cache}; +use crate::command::get_closure::{self, GetClosure}; +use crate::command::login::{self, Login}; +use crate::command::push::{self, Push}; +use crate::command::r#use::{self, Use}; + +/// Attic binary cache client. +#[derive(Debug, Parser)] +#[clap(version)] +#[clap(propagate_version = true)] +pub struct Opts { + #[clap(subcommand)] + pub command: Command, +} + +#[derive(Debug, Subcommand, EnumAsInner)] +pub enum Command { + Login(Login), + Use(Use), + Push(Push), + Cache(Cache), + + #[clap(hide = true)] + GetClosure(GetClosure), +} + +/// Generate shell autocompletion files. +#[derive(Debug, Parser)] +pub struct GenCompletions { + /// The shell to generate autocompletion files for. + shell: Shell, +} + +pub async fn run() -> Result<()> { + // https://github.com/clap-rs/clap/issues/1335 + if let Some("gen-completions") = env::args().nth(1).as_deref() { + return gen_completions(env::args().nth(2)).await; + } + + let opts = Opts::parse(); + + match opts.command { + Command::Login(_) => login::run(opts).await, + Command::Use(_) => r#use::run(opts).await, + Command::Push(_) => push::run(opts).await, + Command::Cache(_) => cache::run(opts).await, + Command::GetClosure(_) => get_closure::run(opts).await, + } +} + +async fn gen_completions(shell: Option) -> Result<()> { + let shell: Shell = shell + .ok_or_else(|| anyhow!("Must specify a shell."))? + .parse() + .unwrap(); + + clap_complete::generate(shell, &mut Opts::command(), "attic", &mut std::io::stdout()); + + Ok(()) +} diff --git a/client/src/command/cache.rs b/client/src/command/cache.rs new file mode 100644 index 0000000..62e0d86 --- /dev/null +++ b/client/src/command/cache.rs @@ -0,0 +1,330 @@ +use anyhow::{anyhow, Result}; +use clap::{Parser, Subcommand}; +use dialoguer::Input; +use humantime::Duration; + +use crate::api::ApiClient; +use crate::cache::CacheRef; +use crate::cli::Opts; +use crate::config::Config; +use attic::api::v1::cache_config::{ + CacheConfig, CreateCacheRequest, KeypairConfig, RetentionPeriodConfig, +}; + +/// Manage caches on an Attic server. +#[derive(Debug, Parser)] +pub struct Cache { + #[clap(subcommand)] + command: Command, +} + +#[derive(Debug, Subcommand)] +enum Command { + Create(Create), + Configure(Configure), + Destroy(Destroy), + Info(Info), +} + +/// Create a cache. +/// +/// You need the `create_cache` permission on the cache that +/// you are creating. +#[derive(Debug, Clone, Parser)] +struct Create { + /// Name of the cache to create. + cache: CacheRef, + + /// Make the cache public. + /// + /// Public caches can be pulled from by anyone without + /// a token. Only those with the `push` permission can push. + /// + /// By default, caches are private. + #[clap(long)] + public: bool, + + /// The Nix store path this binary cache uses. + /// + /// You probably don't want to change this. Changing + /// this can make your cache unusable. + #[clap(long, hide = true, default_value = "/nix/store")] + store_dir: String, + + /// The priority of the binary cache. + /// + /// A lower number denotes a higher priority. + /// has a priority of 40. + #[clap(long, default_value = "41")] + priority: i32, + + /// The signing key name of an upstream cache. + /// + /// When pushing to the cache, paths signed with this key + /// will be skipped by default. Specify this flag multiple + /// times to add multiple key names. + #[clap( + name = "NAME", + long = "upstream-cache-key-name", + default_value = "cache.nixos.org-1" + )] + upstream_cache_key_names: Vec, +} + +/// Configure a cache. +/// +/// You need the `configure_cache` permission on the cache that +/// you are configuring. +#[derive(Debug, Clone, Parser)] +struct Configure { + /// Name of the cache to configure. + cache: CacheRef, + + /// Regenerate the signing keypair. + /// + /// The server-side signing key will be regenerated and + /// all users will need to configure the new signing key + /// in `nix.conf`. + #[clap(long)] + regenerate_keypair: bool, + + /// Make the cache public. + /// + /// Use `--private` to make it private. + #[clap(long)] + public: bool, + + /// Make the cache private. + /// + /// Use `--public` to make it public. + #[clap(long)] + private: bool, + + /// The Nix store path this binary cache uses. + /// + /// You probably don't want to change this. Changing + /// this can make your cache unusable. + #[clap(long, hide = true)] + store_dir: Option, + + /// The priority of the binary cache. + /// + /// A lower number denotes a higher priority. + /// has a priority of 40. + #[clap(long)] + priority: Option, + + /// The signing key name of an upstream cache. + /// + /// When pushing to the cache, paths signed with this key + /// will be skipped by default. Specify this flag multiple + /// times to add multiple key names. + #[clap(value_name = "NAME", long = "upstream-cache-key-name")] + upstream_cache_key_names: Option>, + + /// Set the retention period of the cache. + /// + /// You can use expressions like "2 years", "3 months" + /// and "1y". + #[clap(long, value_name = "PERIOD")] + retention_period: Option, + + /// Reset the retention period of the cache to global default. + #[clap(long)] + reset_retention_period: bool, +} + +/// Destroy a cache. +/// +/// Destroying a cache causes it to become unavailable but the +/// underlying data may not be deleted immediately. Depending +/// on the server configuration, you may or may not be able to +/// create the cache of the same name. +/// +/// You need the `destroy_cache` permission on the cache that +/// you are destroying. +#[derive(Debug, Clone, Parser)] +struct Destroy { + /// Name of the cache to destroy. + cache: CacheRef, + + /// Don't ask for interactive confirmation. + #[clap(long)] + no_confirm: bool, +} + +/// Show the current configuration of a cache. +#[derive(Debug, Clone, Parser)] +struct Info { + /// Name of the cache to query. + cache: CacheRef, +} + +pub async fn run(opts: Opts) -> Result<()> { + let sub = opts.command.as_cache().unwrap(); + match &sub.command { + Command::Create(sub) => create_cache(sub.to_owned()).await, + Command::Configure(sub) => configure_cache(sub.to_owned()).await, + Command::Destroy(sub) => destroy_cache(sub.to_owned()).await, + Command::Info(sub) => show_cache_config(sub.to_owned()).await, + } +} + +async fn create_cache(sub: Create) -> Result<()> { + let config = Config::load()?; + + let (server_name, server, cache) = config.resolve_cache(&sub.cache)?; + let api = ApiClient::from_server_config(server.clone())?; + + let request = CreateCacheRequest { + // TODO: Make this configurable? + keypair: KeypairConfig::Generate, + is_public: sub.public, + priority: sub.priority, + store_dir: sub.store_dir, + upstream_cache_key_names: sub.upstream_cache_key_names, + }; + + api.create_cache(cache, request).await?; + eprintln!( + "✨ Created cache \"{}\" on \"{}\"", + cache.as_str(), + server_name.as_str() + ); + + Ok(()) +} + +async fn configure_cache(sub: Configure) -> Result<()> { + let config = Config::load()?; + + let (server_name, server, cache) = config.resolve_cache(&sub.cache)?; + let mut patch = CacheConfig::blank(); + + if sub.public && sub.private { + return Err(anyhow!( + "`--public` and `--private` cannot be set at the same time." + )); + } + + if sub.retention_period.is_some() && sub.reset_retention_period { + return Err(anyhow!( + "`--retention-period` and `--reset-retention-period` cannot be set at the same time." + )); + } + + if sub.public { + patch.is_public = Some(true); + } else if sub.private { + patch.is_public = Some(false); + } + + if let Some(period) = sub.retention_period { + patch.retention_period = Some(RetentionPeriodConfig::Period(period.as_secs() as u32)); + } else { + patch.retention_period = Some(RetentionPeriodConfig::Global); + } + + if sub.regenerate_keypair { + patch.keypair = Some(KeypairConfig::Generate); + } + + patch.store_dir = sub.store_dir; + patch.priority = sub.priority; + patch.upstream_cache_key_names = sub.upstream_cache_key_names; + + let api = ApiClient::from_server_config(server.clone())?; + api.configure_cache(cache, &patch).await?; + + eprintln!( + "✅ Configured \"{}\" on \"{}\"", + cache.as_str(), + server_name.as_str() + ); + + Ok(()) +} + +async fn destroy_cache(sub: Destroy) -> Result<()> { + let config = Config::load()?; + + let (server_name, server, cache) = config.resolve_cache(&sub.cache)?; + + if !sub.no_confirm { + eprintln!("When you destory a cache:"); + eprintln!(); + eprintln!("1. Everyone will immediately lose access."); + eprintln!("2. The underlying data won't be deleted immediately."); + eprintln!("3. You may not be able to create a cache of the same name."); + eprintln!(); + + let answer: String = Input::new() + .with_prompt(format!( + "⚠️ Type the cache name to confirm destroying \"{}\" on \"{}\"", + cache.as_str(), + server_name.as_str() + )) + .allow_empty(true) + .interact()?; + + if answer != cache.as_str() { + return Err(anyhow!("Incorrect answer. Aborting...")); + } + } + + let api = ApiClient::from_server_config(server.clone())?; + api.destroy_cache(cache).await?; + + eprintln!("🗑️ The cache was destroyed."); + + Ok(()) +} + +async fn show_cache_config(sub: Info) -> Result<()> { + let config = Config::load()?; + + let (_, server, cache) = config.resolve_cache(&sub.cache)?; + let api = ApiClient::from_server_config(server.clone())?; + let cache_config = api.get_cache_config(cache).await?; + + if let Some(is_public) = cache_config.is_public { + eprintln!(" Public: {}", is_public); + } + + if let Some(public_key) = cache_config.public_key { + eprintln!(" Public Key: {}", public_key); + } + + if let Some(substituter_endpoint) = cache_config.substituter_endpoint { + eprintln!("Binary Cache Endpoint: {}", substituter_endpoint); + } + + if let Some(api_endpoint) = cache_config.api_endpoint { + eprintln!(" API Endpoint: {}", api_endpoint); + } + + if let Some(store_dir) = cache_config.store_dir { + eprintln!(" Store Directory: {}", store_dir); + } + + if let Some(priority) = cache_config.priority { + eprintln!(" Priority: {}", priority); + } + + if let Some(upstream_cache_key_names) = cache_config.upstream_cache_key_names { + eprintln!(" Upstream Cache Keys: {:?}", upstream_cache_key_names); + } + + if let Some(retention_period) = cache_config.retention_period { + match retention_period { + RetentionPeriodConfig::Period(period) => { + eprintln!(" Retention Period: {:?}", period); + } + RetentionPeriodConfig::Global => { + eprintln!(" Retention Period: Global Default"); + } + } + } + + Ok(()) +} diff --git a/client/src/command/get_closure.rs b/client/src/command/get_closure.rs new file mode 100644 index 0000000..2efce91 --- /dev/null +++ b/client/src/command/get_closure.rs @@ -0,0 +1,31 @@ +use std::path::PathBuf; + +use anyhow::Result; +use clap::Parser; + +use crate::cli::Opts; +use attic::nix_store::NixStore; + +/// Returns the closure of a store path (test). +/// +/// This is similar to `nix-store -qR`. +#[derive(Debug, Parser)] +pub struct GetClosure { + store_path: PathBuf, +} + +pub async fn run(opts: Opts) -> Result<()> { + let sub = opts.command.as_get_closure().unwrap(); + + let store = NixStore::connect()?; + let store_path = store.follow_store_path(&sub.store_path)?; + let closure = store + .compute_fs_closure(store_path, false, false, false) + .await?; + + for path in &closure { + println!("{}", store.get_full_path(path).to_str().unwrap()); + } + + Ok(()) +} diff --git a/client/src/command/login.rs b/client/src/command/login.rs new file mode 100644 index 0000000..4514c10 --- /dev/null +++ b/client/src/command/login.rs @@ -0,0 +1,48 @@ +use anyhow::Result; +use clap::Parser; + +use crate::cache::ServerName; +use crate::cli::Opts; +use crate::config::{Config, ServerConfig}; + +/// Log into an Attic server. +#[derive(Debug, Parser)] +pub struct Login { + /// Name of the server. + name: ServerName, + + /// Endpoint of the server. + endpoint: String, + + /// Access token. + token: Option, +} + +pub async fn run(opts: Opts) -> Result<()> { + let sub = opts.command.as_login().unwrap(); + let mut config = Config::load()?; + let mut config_m = config.as_mut(); + + if let Some(server) = config_m.servers.get_mut(&sub.name) { + eprintln!("✍️ Overwriting server \"{}\"", sub.name.as_str()); + + server.endpoint = sub.endpoint.to_owned(); + server.token = sub.token.to_owned(); + } else { + eprintln!("✍️ Configuring server \"{}\"", sub.name.as_str()); + + config_m.servers.insert( + sub.name.to_owned(), + ServerConfig { + endpoint: sub.endpoint.to_owned(), + token: sub.token.to_owned(), + }, + ); + } + + if config_m.servers.len() == 1 { + config_m.default_server = Some(sub.name.to_owned()); + } + + Ok(()) +} diff --git a/client/src/command/mod.rs b/client/src/command/mod.rs new file mode 100644 index 0000000..c183fa9 --- /dev/null +++ b/client/src/command/mod.rs @@ -0,0 +1,5 @@ +pub mod cache; +pub mod get_closure; +pub mod login; +pub mod push; +pub mod r#use; diff --git a/client/src/command/push.rs b/client/src/command/push.rs new file mode 100644 index 0000000..5649914 --- /dev/null +++ b/client/src/command/push.rs @@ -0,0 +1,355 @@ +use std::collections::{HashMap, HashSet}; +use std::fmt::Write; +use std::path::PathBuf; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::{Duration, Instant}; + +use anyhow::{anyhow, Result}; +use clap::Parser; +use futures::future::join_all; +use futures::stream::Stream; +use indicatif::{HumanBytes, MultiProgress, ProgressBar, ProgressState, ProgressStyle}; +use tokio::sync::Semaphore; + +use crate::api::ApiClient; +use crate::cache::{CacheName, CacheRef}; +use crate::cli::Opts; +use crate::config::Config; +use attic::api::v1::upload_path::UploadPathNarInfo; +use attic::error::AtticResult; +use attic::nix_store::{NixStore, StorePath, StorePathHash, ValidPathInfo}; + +/// Push closures to a binary cache. +#[derive(Debug, Parser)] +pub struct Push { + /// The cache to push to. + cache: CacheRef, + + /// The store paths to push. + paths: Vec, + + /// Push the specified paths only and do not compute closures. + #[clap(long)] + no_closure: bool, + + /// Ignore the upstream cache filter. + #[clap(long)] + ignore_upstream_cache_filter: bool, +} + +struct PushPlan { + /// Store paths to push. + store_path_map: HashMap, + + /// The number of paths in the original full closure. + num_all_paths: usize, + + /// Number of paths that have been filtered out because they are already cached. + num_already_cached: usize, + + /// Number of paths that have been filtered out because they are signed by an upstream cache. + num_upstream: usize, +} + +/// Wrapper to update a progress bar as a NAR is streamed. +struct NarStreamProgress { + stream: S, + bar: ProgressBar, +} + +/// Uploads a single path to a cache. +pub async fn upload_path( + store: Arc, + path_info: ValidPathInfo, + api: ApiClient, + cache: &CacheName, + mp: MultiProgress, +) -> Result<()> { + let path = &path_info.path; + let upload_info = { + let full_path = store + .get_full_path(path) + .to_str() + .ok_or_else(|| anyhow!("Path contains non-UTF-8"))? + .to_string(); + + let references = path_info + .references + .into_iter() + .map(|pb| { + pb.to_str() + .ok_or_else(|| anyhow!("Reference contains non-UTF-8")) + .map(|s| s.to_owned()) + }) + .collect::, anyhow::Error>>()?; + + UploadPathNarInfo { + cache: cache.to_owned(), + store_path_hash: path.to_hash(), + store_path: full_path, + references, + system: None, // TODO + deriver: None, // TODO + sigs: path_info.sigs, + ca: path_info.ca, + nar_hash: path_info.nar_hash.to_owned(), + nar_size: path_info.nar_size as usize, + } + }; + + let template = format!( + "{{spinner}} {: <20.20} {{bar:40.green/blue}} {{human_bytes:10}} ({{average_speed}})", + path.name(), + ); + let style = ProgressStyle::with_template(&template) + .unwrap() + .tick_chars("🕛🕐🕑🕒🕓🕔🕕🕖🕗🕘🕙🕚✅") + .progress_chars("██ ") + .with_key("human_bytes", |state: &ProgressState, w: &mut dyn Write| { + write!(w, "{}", HumanBytes(state.pos())).unwrap(); + }) + // Adapted from + // + .with_key( + "average_speed", + |state: &ProgressState, w: &mut dyn Write| match (state.pos(), state.elapsed()) { + (pos, elapsed) if elapsed > Duration::ZERO => { + write!(w, "{}", average_speed(pos, elapsed)).unwrap(); + } + _ => write!(w, "-").unwrap(), + }, + ); + let bar = mp.add(ProgressBar::new(path_info.nar_size)); + bar.set_style(style); + let nar_stream = NarStreamProgress::new(store.nar_from_path(path.to_owned()), bar.clone()); + + let start = Instant::now(); + match api.upload_path(upload_info, nar_stream).await { + Ok(_) => { + let elapsed = start.elapsed(); + let seconds = elapsed.as_secs_f64(); + let speed = (path_info.nar_size as f64 / seconds) as u64; + + mp.suspend(|| { + eprintln!( + "✅ {} ({}/s)", + path.as_os_str().to_string_lossy(), + HumanBytes(speed) + ); + }); + bar.finish_and_clear(); + Ok(()) + } + Err(e) => { + mp.suspend(|| { + eprintln!("❌ {}: {}", path.as_os_str().to_string_lossy(), e); + }); + bar.finish_and_clear(); + Err(e) + } + } +} + +pub async fn run(opts: Opts) -> Result<()> { + let sub = opts.command.as_push().unwrap(); + let config = Config::load()?; + + let store = Arc::new(NixStore::connect()?); + let roots = sub + .paths + .clone() + .into_iter() + .map(|p| store.follow_store_path(&p)) + .collect::, _>>()?; + + let (server_name, server, cache) = config.resolve_cache(&sub.cache)?; + + let api = ApiClient::from_server_config(server.clone())?; + let plan = PushPlan::plan( + store.clone(), + &api, + cache, + roots, + sub.no_closure, + sub.ignore_upstream_cache_filter, + ) + .await?; + + if plan.store_path_map.is_empty() { + if plan.num_all_paths == 0 { + eprintln!("🤷 Nothing selected."); + } else { + eprintln!( + "✅ All done! ({num_already_cached} already cached, {num_upstream} in upstream)", + num_already_cached = plan.num_already_cached, + num_upstream = plan.num_upstream, + ); + } + + return Ok(()); + } else { + eprintln!("⚙️ Pushing {num_missing_paths} paths to \"{cache}\" on \"{server}\" ({num_already_cached} already cached, {num_upstream} in upstream)...", + cache = cache.as_str(), + server = server_name.as_str(), + num_missing_paths = plan.store_path_map.len(), + num_already_cached = plan.num_already_cached, + num_upstream = plan.num_upstream, + ); + } + + let mp = MultiProgress::new(); + let upload_limit = Arc::new(Semaphore::new(10)); // FIXME + let futures = plan + .store_path_map + .into_iter() + .map(|(_, path_info)| { + let store = store.clone(); + let api = api.clone(); + let mp = mp.clone(); + let upload_limit = upload_limit.clone(); + + async move { + let permit = upload_limit.acquire().await?; + + upload_path(store.clone(), path_info, api, cache, mp.clone()).await?; + + drop(permit); + Ok::<(), anyhow::Error>(()) + } + }) + .collect::>(); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::>>()?; + + Ok(()) +} + +impl PushPlan { + /// Creates a plan. + async fn plan( + store: Arc, + api: &ApiClient, + cache: &CacheName, + roots: Vec, + no_closure: bool, + ignore_upstream_filter: bool, + ) -> Result { + // Compute closure + let closure = if no_closure { + roots + } else { + store + .compute_fs_closure_multi(roots, false, false, false) + .await? + }; + + let mut store_path_map: HashMap = { + let futures = closure + .iter() + .map(|path| { + let store = store.clone(); + let path = path.clone(); + let path_hash = path.to_hash(); + + async move { + let path_info = store.query_path_info(path).await?; + Ok((path_hash, path_info)) + } + }) + .collect::>(); + + join_all(futures).await.into_iter().collect::>()? + }; + + let num_all_paths = store_path_map.len(); + if store_path_map.is_empty() { + return Ok(Self { + store_path_map, + num_all_paths, + num_already_cached: 0, + num_upstream: 0, + }); + } + + // Confirm remote cache validity, query cache config + let cache_config = api.get_cache_config(cache).await?; + + if !ignore_upstream_filter { + // Filter out paths signed by upstream caches + let upstream_cache_key_names = + cache_config.upstream_cache_key_names.unwrap_or_default(); + store_path_map.retain(|_, pi| { + for sig in &pi.sigs { + if let Some((name, _)) = sig.split_once(':') { + if upstream_cache_key_names.iter().any(|u| name == u) { + return false; + } + } + } + + true + }); + } + + let num_filtered_paths = store_path_map.len(); + if store_path_map.is_empty() { + return Ok(Self { + store_path_map, + num_all_paths, + num_already_cached: 0, + num_upstream: num_all_paths - num_filtered_paths, + }); + } + + // Query missing paths + let missing_path_hashes: HashSet = { + let store_path_hashes = store_path_map.keys().map(|sph| sph.to_owned()).collect(); + let res = api.get_missing_paths(cache, store_path_hashes).await?; + res.missing_paths.into_iter().collect() + }; + store_path_map.retain(|sph, _| missing_path_hashes.contains(sph)); + let num_missing_paths = store_path_map.len(); + + Ok(Self { + store_path_map, + num_all_paths, + num_already_cached: num_filtered_paths - num_missing_paths, + num_upstream: num_all_paths - num_filtered_paths, + }) + } +} + +impl>>> NarStreamProgress { + fn new(stream: S, bar: ProgressBar) -> Self { + Self { stream, bar } + } +} + +impl>> + Unpin> Stream for NarStreamProgress { + type Item = AtticResult>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match Pin::new(&mut self.stream).as_mut().poll_next(cx) { + Poll::Ready(Some(data)) => { + if let Ok(data) = &data { + self.bar.inc(data.len() as u64); + } + + Poll::Ready(Some(data)) + } + other => other, + } + } +} + +// Just the average, no fancy sliding windows that cause wild fluctuations +// +fn average_speed(bytes: u64, duration: Duration) -> String { + let speed = bytes as f64 * 1000_f64 / duration.as_millis() as f64; + format!("{}/s", HumanBytes(speed as u64)) +} diff --git a/client/src/command/use.rs b/client/src/command/use.rs new file mode 100644 index 0000000..0520943 --- /dev/null +++ b/client/src/command/use.rs @@ -0,0 +1,68 @@ +use anyhow::{anyhow, Result}; +use clap::Parser; +use reqwest::Url; + +use crate::api::ApiClient; +use crate::cache::CacheRef; +use crate::cli::Opts; +use crate::config::Config; +use crate::nix_config::NixConfig; +use crate::nix_netrc::NixNetrc; + +/// Configure Nix to use a binary cache. +#[derive(Debug, Parser)] +pub struct Use { + /// The cache to configure. + cache: CacheRef, +} + +pub async fn run(opts: Opts) -> Result<()> { + let sub = opts.command.as_use().unwrap(); + let config = Config::load()?; + + let (server_name, server, cache) = config.resolve_cache(&sub.cache)?; + + let api = ApiClient::from_server_config(server.clone())?; + let cache_config = api.get_cache_config(cache).await?; + + let substituter = cache_config + .substituter_endpoint + .ok_or_else(|| anyhow!("The server did not tell us where the binary cache endpoint is."))?; + let public_key = cache_config.public_key + .ok_or_else(|| anyhow!("The server did not tell us which public key it uses. Is signing managed by the client?"))?; + + eprintln!( + "Configuring Nix to use \"{cache}\" on \"{server_name}\":", + cache = cache.as_str(), + server_name = server_name.as_str(), + ); + + // Modify nix.conf + eprintln!("+ Substituter: {}", substituter); + eprintln!("+ Trusted Public Key: {}", public_key); + + let mut nix_config = NixConfig::load().await?; + nix_config.add_substituter(&substituter); + nix_config.add_trusted_public_key(&public_key); + + // Modify netrc + if let Some(token) = &server.token { + eprintln!("+ Access Token"); + + let mut nix_netrc = NixNetrc::load().await?; + let host = Url::parse(&server.endpoint)? + .host() + .map(|h| h.to_string()) + .unwrap(); + nix_netrc.add_token(host, token.to_string()); + nix_netrc.save().await?; + + let netrc_path = nix_netrc.path().unwrap().to_str().unwrap(); + + nix_config.set_netrc_file(netrc_path); + } + + nix_config.save().await?; + + Ok(()) +} diff --git a/client/src/config.rs b/client/src/config.rs new file mode 100644 index 0000000..75d4ad4 --- /dev/null +++ b/client/src/config.rs @@ -0,0 +1,182 @@ +//! Client configurations. +//! +//! Configuration files are stored under `$XDG_CONFIG_HOME/attic/config.toml`. +//! We automatically write modified configurations back for a good end-user +//! experience (e.g., `attic login`). + +use std::collections::HashMap; +use std::fs; +use std::ops::{Deref, DerefMut}; +use std::path::PathBuf; + +use anyhow::{anyhow, Result}; +use serde::{Deserialize, Serialize}; +use xdg::BaseDirectories; + +use crate::cache::{CacheName, CacheRef, ServerName}; + +/// Application prefix in XDG base directories. +/// +/// This will be concatenated into `$XDG_CONFIG_HOME/attic`. +const XDG_PREFIX: &str = "attic"; + +/// Configuration loader. +#[derive(Debug)] +pub struct Config { + /// Actual configuration data. + data: ConfigData, + + /// Path to write modified configurations back to. + path: Option, +} + +/// Client configurations. +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct ConfigData { + /// The default server to connect to. + #[serde(rename = "default-server")] + pub default_server: Option, + + /// A set of remote servers and access credentials. + #[serde(default = "HashMap::new")] + #[serde(skip_serializing_if = "HashMap::is_empty")] + pub servers: HashMap, +} + +/// Configuration of a server. +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct ServerConfig { + pub endpoint: String, + pub token: Option, +} + +/// Wrapper that automatically saves the config once dropped. +pub struct ConfigWriteGuard<'a>(&'a mut Config); + +impl Config { + /// Loads the configuration from the system. + pub fn load() -> Result { + let path = get_config_path() + .map_err(|e| { + tracing::warn!("Could not get config path: {}", e); + e + }) + .ok(); + + let data = ConfigData::load_from_path(path.as_ref())?; + + Ok(Self { data, path }) + } + + /// Returns a mutable reference to the configuration. + pub fn as_mut(&mut self) -> ConfigWriteGuard { + ConfigWriteGuard(self) + } + + /// Saves the configuration back to the system, if possible. + pub fn save(&self) -> Result<()> { + if let Some(path) = &self.path { + let serialized = toml::to_string(&self.data)?; + fs::write(path, serialized.as_bytes())?; + + tracing::debug!("Saved modified configuration to {:?}", path); + } + + Ok(()) + } +} + +impl Deref for Config { + type Target = ConfigData; + + fn deref(&self) -> &Self::Target { + &self.data + } +} + +impl ConfigData { + fn load_from_path(path: Option<&PathBuf>) -> Result { + if let Some(path) = path { + if path.exists() { + let contents = fs::read(path)?; + let data = toml::from_slice(&contents)?; + return Ok(data); + } + } + + Ok(ConfigData::default()) + } + + pub fn default_server(&self) -> Result<(&ServerName, &ServerConfig)> { + if let Some(name) = &self.default_server { + let config = self.servers.get(name).ok_or_else(|| { + anyhow!( + "Configured default server \"{}\" does not exist", + name.as_str() + ) + })?; + Ok((name, config)) + } else if let Some((name, config)) = self.servers.iter().next() { + Ok((name, config)) + } else { + Err(anyhow!("No servers are available.")) + } + } + + pub fn resolve_cache<'a>( + &'a self, + r: &'a CacheRef, + ) -> Result<(&'a ServerName, &'a ServerConfig, &'a CacheName)> { + match r { + CacheRef::DefaultServer(cache) => { + let (name, config) = self.default_server()?; + Ok((name, config, cache)) + } + CacheRef::ServerQualified(server, cache) => { + let config = self + .servers + .get(server) + .ok_or_else(|| anyhow!("Server \"{}\" does not exist", server.as_str()))?; + Ok((server, config, cache)) + } + } + } +} + +impl Default for ConfigData { + fn default() -> Self { + Self { + default_server: None, + servers: HashMap::new(), + } + } +} + +impl<'a> Deref for ConfigWriteGuard<'a> { + type Target = ConfigData; + + fn deref(&self) -> &Self::Target { + &self.0.data + } +} + +impl<'a> DerefMut for ConfigWriteGuard<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0.data + } +} + +impl<'a> Drop for ConfigWriteGuard<'a> { + fn drop(&mut self) { + if let Err(e) = self.0.save() { + tracing::error!("Could not save modified configuration: {}", e); + } + } +} + +fn get_config_path() -> Result { + let xdg_dirs = BaseDirectories::with_prefix(XDG_PREFIX)?; + let config_path = xdg_dirs.place_config_file("config.toml")?; + + Ok(config_path) +} diff --git a/client/src/main.rs b/client/src/main.rs new file mode 100644 index 0000000..1eb4765 --- /dev/null +++ b/client/src/main.rs @@ -0,0 +1,36 @@ +#![deny( + asm_sub_register, + deprecated, + missing_abi, + unsafe_code, + unused_macros, + unused_must_use, + unused_unsafe +)] +#![deny(clippy::from_over_into, clippy::needless_question_mark)] +#![cfg_attr( + not(debug_assertions), + deny(unused_imports, unused_mut, unused_variables,) +)] + +mod api; +mod cache; +mod cli; +mod command; +mod config; +mod nix_config; +mod nix_netrc; +mod version; + +use anyhow::Result; + +#[tokio::main] +async fn main() -> Result<()> { + init_logging()?; + cli::run().await +} + +fn init_logging() -> Result<()> { + tracing_subscriber::fmt::init(); + Ok(()) +} diff --git a/client/src/nix_config.rs b/client/src/nix_config.rs new file mode 100644 index 0000000..a232eca --- /dev/null +++ b/client/src/nix_config.rs @@ -0,0 +1,265 @@ +//! Nix configuration files. +//! +//! We automatically edit the user's `nix.conf` to add new +//! binary caches while trying to keep the formatting intact. + +use std::path::PathBuf; + +use anyhow::{anyhow, Result}; +use lazy_static::lazy_static; +use regex::Regex; +use tokio::fs; +use xdg::BaseDirectories; + +lazy_static! { + static ref COMMENT_LINE: Regex = { + Regex::new(r"^\s*(#.*)?$").unwrap() + }; + + static ref KV_LINE: Regex = { + // I know what you are thinking, but... + // `key=val` is not valid, and `🔥🔥🔥very=WILD=key🔥🔥🔥 = value` is perfectly valid :) + // Also, despite syntax highlighting of some editors, backslashes do _not_ escape the comment character. + Regex::new(r"^(?P\s*)(?P[^\s]+)(?P\s+)=(?P\s+)(?P[^#]+)(?P#.*)?$").unwrap() + }; +} + +/// The server of cache.nixos.org. +const CACHE_NIXOS_ORG_SUBSTITUTER: &str = "https://cache.nixos.org"; + +/// The public key of cache.nixos.org. +const CACHE_NIXOS_ORG_KEY: &str = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="; + +#[derive(Debug)] +pub struct NixConfig { + /// Path to write the modified configuration back to. + path: Option, + + /// Configuration lines. + lines: Vec, +} + +/// A line in the configuration. +#[derive(Debug, Clone, PartialEq, Eq)] +enum Line { + Comment(String), + KV { + key: String, + value: String, + whitespace_s: String, + whitespace_l: String, + whitespace_r: String, + comment: Option, + }, +} + +impl NixConfig { + pub async fn load() -> Result { + let nix_base = BaseDirectories::with_prefix("nix")?; + let path = nix_base.place_config_file("nix.conf")?; + + let lines = if path.exists() { + let content = fs::read_to_string(&path).await?; + Line::from_lines(&content)? + } else { + Vec::new() + }; + + Ok(Self { + path: Some(path), + lines, + }) + } + + /// Saves the modified configuration file. + pub async fn save(&self) -> Result<()> { + if let Some(path) = &self.path { + fs::write(path, self.to_string()).await?; + Ok(()) + } else { + Err(anyhow!("Don't know how to save the nix.conf")) + } + } + + /// Reserialize the configuration back to a string. + pub fn to_string(&self) -> String { + self.lines + .iter() + .map(|l| l.to_string()) + .collect::>() + .join("\n") + } + + /// Adds a new substituter. + pub fn add_substituter(&mut self, substituter: &str) { + self.prepend_to_list("substituters", substituter, CACHE_NIXOS_ORG_SUBSTITUTER); + } + + /// Adds a new trusted public key. + pub fn add_trusted_public_key(&mut self, public_key: &str) { + self.prepend_to_list("trusted-public-keys", public_key, CACHE_NIXOS_ORG_KEY); + } + + /// Sets the netrc-file config. + pub fn set_netrc_file(&mut self, path: &str) { + if let Some(kv) = self.find_key("netrc-file") { + if let Line::KV { ref mut value, .. } = kv { + *value = path.to_string(); + } + } else { + self.lines + .push(Line::kv("netrc-file".to_string(), path.to_string())); + } + } + + fn prepend_to_list(&mut self, key: &str, value: &str, default_tail: &str) { + if let Some(kv) = self.find_key(key) { + if let Line::KV { + value: ref mut list, + .. + } = kv + { + if !list.split(' ').any(|el| el == value) { + *list = format!("{value} {list}"); + } + return; + } + unreachable!(); + } else { + let list = format!("{value} {default_tail}"); + self.lines.push(Line::kv(key.to_string(), list)); + } + } + + fn find_key(&mut self, key: &str) -> Option<&mut Line> { + self.lines.iter_mut().find(|l| { + if let Line::KV { key: k, .. } = l { + k == key + } else { + false + } + }) + } +} + +impl Line { + fn from_lines(s: &str) -> Result> { + let mut lines: Vec = Vec::new(); + + for line in s.lines() { + lines.push(Line::from_str(line)?); + } + + Ok(lines) + } + + fn from_str(line: &str) -> Result { + if COMMENT_LINE.is_match(line) { + return Ok(Self::Comment(line.to_string())); + } + + if let Some(matches) = KV_LINE.captures(line) { + return Ok(Self::KV { + key: matches.name("key").unwrap().as_str().to_owned(), + value: matches.name("value").unwrap().as_str().to_owned(), + whitespace_s: matches.name("whitespace_s").unwrap().as_str().to_owned(), + whitespace_l: matches.name("whitespace_l").unwrap().as_str().to_owned(), + whitespace_r: matches.name("whitespace_r").unwrap().as_str().to_owned(), + comment: matches.name("comment").map(|s| s.as_str().to_owned()), + }); + } + + Err(anyhow!("Line \"{}\" isn't valid", line)) + } + + fn to_string(&self) -> String { + match self { + Self::Comment(l) => l.clone(), + Self::KV { + key, + value, + whitespace_s, + whitespace_l, + whitespace_r, + comment, + } => { + let cmt = comment.as_deref().unwrap_or(""); + format!("{whitespace_s}{key}{whitespace_l}={whitespace_r}{value}{cmt}") + } + } + } + + fn kv(key: String, value: String) -> Self { + Self::KV { + key, + value, + whitespace_s: String::new(), + whitespace_l: " ".to_string(), + whitespace_r: " ".to_string(), + comment: None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_nix_config_parse_line() { + assert_eq!( + Line::from_str("# some comment").unwrap(), + Line::Comment("# some comment".to_string()), + ); + + assert_eq!( + Line::from_str(" # some indented comment").unwrap(), + Line::Comment(" # some indented comment".to_string()), + ); + + assert_eq!( + Line::from_str(" ").unwrap(), + Line::Comment(" ".to_string()), + ); + + assert_eq!( + Line::from_str("key = value").unwrap(), + Line::KV { + key: "key".to_string(), + value: "value".to_string(), + whitespace_s: "".to_string(), + whitespace_l: " ".to_string(), + whitespace_r: " ".to_string(), + comment: None, + } + ); + + assert_eq!( + Line::from_str(" 🔥🔥🔥very=WILD=key🔥🔥🔥 = value = #comment").unwrap(), + Line::KV { + key: "🔥🔥🔥very=WILD=key🔥🔥🔥".to_string(), + value: "value = ".to_string(), + whitespace_s: "\t ".to_string(), + whitespace_l: " ".to_string(), + whitespace_r: "\t".to_string(), + comment: Some("#comment".to_string()), + } + ); + } + + #[test] + fn test_nix_config_line_roundtrip() { + let cases = [ + "# some comment", + " # some indented comment", + " ", + "key = value", + " 🔥🔥🔥very=WILD=key🔥🔥🔥 = value = #comment", + ]; + + for case in cases { + let line = Line::from_str(case).unwrap(); + assert_eq!(case, line.to_string()); + } + } +} diff --git a/client/src/nix_netrc.rs b/client/src/nix_netrc.rs new file mode 100644 index 0000000..b4996a7 --- /dev/null +++ b/client/src/nix_netrc.rs @@ -0,0 +1,248 @@ +//! Nix netrc files. +//! +//! We automatically edit the user's `netrc` to add cache server +//! tokens. +//! +//! This is a very naive implementation. The whole thing should be +//! refactored to be cleaner and operate on streams. + +use std::collections::HashMap; +use std::fmt; +use std::path::{Path, PathBuf}; + +use anyhow::{anyhow, Result}; +use tokio::fs; +use xdg::BaseDirectories; + +#[derive(Debug)] +pub struct NixNetrc { + /// Path to write the modified netrc back to. + path: Option, + + /// Machines in the netrc file. + machines: HashMap, +} + +#[derive(Debug, PartialEq, Eq)] +struct Machine { + /// A password. + password: Option, + + /// Any other tokens that we must preserve. + /// + /// We output in pairs when reserializing. Curl allows the key + /// and value to be on different lines, but who knows about other + /// implementations? + other: Vec, +} + +impl NixNetrc { + pub async fn load() -> Result { + let nix_base = BaseDirectories::with_prefix("nix")?; + let path = nix_base.place_config_file("netrc")?; + + let machines = if path.exists() { + let content = fs::read_to_string(&path).await?; + parse_machines(&content)? + } else { + HashMap::new() + }; + + Ok(Self { + path: Some(path), + machines, + }) + } + + /// Returns the path to the netrc file. + pub fn path(&self) -> Option<&Path> { + self.path.as_deref() + } + + /// Saves the modified configuration file. + pub async fn save(&self) -> Result<()> { + if let Some(path) = &self.path { + let mut content = String::new(); + serialize_machines(&mut content, &self.machines)?; + fs::write(path, content).await?; + Ok(()) + } else { + Err(anyhow!("Don't know how to save the netrc")) + } + } + + /// Adds a token as a password. + pub fn add_token(&mut self, machine: String, token: String) { + if let Some(m) = self.machines.get_mut(&machine) { + m.password = Some(token); + } else { + self.machines.insert( + machine, + Machine { + password: Some(token), + other: Vec::new(), + }, + ); + } + } +} + +fn parse_machines(netrc: &str) -> Result> { + let mut machines = HashMap::new(); + let mut cur_machine = None; + + let mut cur; + let mut remaining = netrc; + while !remaining.is_empty() { + (cur, remaining) = get_next_token(remaining); + + match cur { + "" => { + break; + } + "default" => { + if let Some((name, machine)) = cur_machine { + machines.insert(name, machine); + } + + cur_machine = Some(( + "".to_string(), + Machine { + password: None, + other: Vec::new(), + }, + )); + } + "machine" => { + let (m_name, m_remaining) = get_next_token(remaining); + remaining = m_remaining; + + if let Some((name, machine)) = cur_machine { + machines.insert(name, machine); + } + + cur_machine = Some(( + m_name.to_string(), + Machine { + password: None, + other: Vec::new(), + }, + )); + } + "password" => { + let (m_password, m_remaining) = get_next_token(remaining); + remaining = m_remaining; + + if let Some((_, ref mut machine)) = &mut cur_machine { + machine.password = Some(m_password.to_string()); + } else { + return Err(anyhow!("Password field outside a machine block")); + } + } + tok => { + if let Some((_, ref mut machine)) = &mut cur_machine { + machine.other.push(tok.to_string()); + } else { + return Err(anyhow!("Unknown token {} outside a machine block", tok)); + } + } + } + } + + if let Some((name, machine)) = cur_machine { + machines.insert(name, machine); + } + + Ok(machines) +} + +fn serialize_machines(w: &mut impl fmt::Write, machines: &HashMap) -> Result<()> { + for (name, machine) in machines.iter() { + if name.is_empty() { + writeln!(w, "default")?; + } else { + writeln!(w, "machine {}", name)?; + } + + if let Some(password) = &machine.password { + writeln!(w, "password {}", password)?; + } + + for chunk in machine.other.chunks(2) { + writeln!(w, "{}", chunk.join(" "))?; + } + } + + Ok(()) +} + +fn get_next_token(s: &str) -> (&str, &str) { + let s = strip_leading_whitespace(s); + if let Some(idx) = s.find(|c| c == '\n' || c == ' ' || c == '\t') { + (&s[..idx], strip_leading_whitespace(&s[idx + 1..])) + } else { + (s, "") + } +} + +fn strip_leading_whitespace(s: &str) -> &str { + if let Some(idx) = s.find(|c| c != '\n' && c != ' ' && c != '\t') { + &s[idx..] + } else { + "" + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_netrc_strip() { + assert_eq!("", strip_leading_whitespace(" ")); + assert_eq!("a", strip_leading_whitespace(" a")); + assert_eq!("abc", strip_leading_whitespace(" \t\t\n\nabc")); + assert_eq!("abc", strip_leading_whitespace("abc")); + } + + #[test] + fn test_netrc_tokenization() { + assert_eq!(("", ""), get_next_token("")); + assert_eq!(("", ""), get_next_token(" ")); + assert_eq!(("", ""), get_next_token("\n")); + assert_eq!(("", ""), get_next_token("\t")); + + assert_eq!(("a", ""), get_next_token("a ")); + assert_eq!(("a", ""), get_next_token(" a")); + assert_eq!(("a", ""), get_next_token(" a ")); + + assert_eq!(("abc", ""), get_next_token("abc")); + + assert_eq!(("a", "b"), get_next_token("a b")); + assert_eq!(("a", "b c"), get_next_token("a b c")); + assert_eq!(("a", "b\nc"), get_next_token("a\nb\nc")); + assert_eq!(("a", "b\nc"), get_next_token("a\tb\nc")); + + assert_eq!(("a", "b c"), get_next_token("a b c")); + assert_eq!(("a", "b\nc"), get_next_token("a\n\n\nb\nc")); + assert_eq!(("a", "b\nc"), get_next_token("a\n\t\nb\nc")); + } + + #[test] + fn test_netrc_parse() { + let machines = parse_machines( + "default password hunter2 machine localhost login login password 114514", + ) + .unwrap(); + eprintln!("{:#?}", machines); + + assert_eq!(Some("114514".to_string()), machines["localhost"].password); + + let mut serialized = String::new(); + serialize_machines(&mut serialized, &machines).unwrap(); + eprintln!("{}", serialized); + + let reparse = parse_machines(&serialized).unwrap(); + assert_eq!(machines, reparse); + } +} diff --git a/client/src/version.rs b/client/src/version.rs new file mode 100644 index 0000000..4da5d2e --- /dev/null +++ b/client/src/version.rs @@ -0,0 +1,8 @@ +/// The distributor of this Attic client. +/// +/// Common values include `nixpkgs`, `attic` and `dev`. +pub const ATTIC_DISTRIBUTOR: &str = if let Some(distro) = option_env!("ATTIC_DISTRIBUTOR") { + distro +} else { + "unknown" +}; diff --git a/default.nix b/default.nix new file mode 100644 index 0000000..20e2b64 --- /dev/null +++ b/default.nix @@ -0,0 +1,8 @@ +let + flake = import ./flake-compat.nix; +in flake.defaultNix.default.overrideAttrs (_: { + passthru = { + attic-client = flake.defaultNix.outputs.packages.${builtins.currentSystem}.attic-client; + demo = flake.defaultNix.outputs.devShells.${builtins.currentSystem}.demo; + }; +}) diff --git a/flake-compat.nix b/flake-compat.nix new file mode 100644 index 0000000..96341ad --- /dev/null +++ b/flake-compat.nix @@ -0,0 +1,9 @@ +let + lock = builtins.fromJSON (builtins.readFile ./flake.lock); + flakeCompat = import (fetchTarball { + url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; + sha256 = lock.nodes.flake-compat.locked.narHash; + }); +in flakeCompat { + src = ./.; +} diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000..9e0e5a4 --- /dev/null +++ b/flake.lock @@ -0,0 +1,60 @@ +{ + "nodes": { + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1668681692, + "narHash": "sha256-Ht91NGdewz8IQLtWZ9LCeNXMSXHUss+9COoqu6JLmXU=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "009399224d5e398d03b22badca40a37ac85412a1", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1672428209, + "narHash": "sha256-eejhqkDz2cb2vc5VeaWphJz8UXNuoNoM8/Op8eWv2tQ=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "293a28df6d7ff3dec1e61e37cc4ee6e6c0fb0847", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": "nixpkgs", + "utils": "utils" + } + }, + "utils": { + "locked": { + "lastModified": 1667395993, + "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000..1f31591 --- /dev/null +++ b/flake.nix @@ -0,0 +1,91 @@ +{ + description = "A Nix binary cache server"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + utils.url = "github:numtide/flake-utils"; + + flake-compat = { + url = "github:edolstra/flake-compat"; + flake = false; + }; + }; + + outputs = { self, nixpkgs, utils, ... }: let + supportedSystems = utils.lib.defaultSystems; + in utils.lib.eachSystem supportedSystems (system: let + pkgs = import nixpkgs { inherit system; }; + + inherit (pkgs) lib; + in rec { + packages = { + default = packages.attic; + + attic = pkgs.callPackage ./package.nix { }; + attic-client = packages.attic.override { clientOnly = true; }; + + attic-server = let + attic-server = pkgs.callPackage ./package.nix { + crates = [ "attic-server" ]; + }; + in attic-server.overrideAttrs (old: { + pname = "attic-server"; + + CARGO_PROFILE_RELEASE_LTO = "fat"; + CARGO_PROFILE_RELEASE_CODEGEN_UNITS = "1"; + }); + + attic-server-image = pkgs.dockerTools.buildImage { + name = "attic-server"; + tag = "main"; + config = { + Entrypoint = [ "${packages.attic-server}/bin/atticd" ]; + Cmd = [ "--mode" "api-server" ]; + Env = [ + "SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" + ]; + }; + }; + + book = pkgs.callPackage ./book { + attic = packages.attic; + }; + }; + + devShells = { + default = pkgs.mkShell { + inputsFrom = with packages; [ attic book ]; + nativeBuildInputs = with pkgs; [ + rustfmt clippy + cargo-expand cargo-outdated cargo-edit + + sqlite-interactive + + editorconfig-checker + + flyctl + ] ++ (lib.optionals pkgs.stdenv.isLinux [ + linuxPackages.perf + ]); + + NIX_PATH = "nixpkgs=${pkgs.path}"; + RUST_SRC_PATH = "${pkgs.rustPlatform.rustcSrc}/library"; + + ATTIC_DISTRIBUTOR = "dev"; + }; + + demo = pkgs.mkShell { + nativeBuildInputs = [ + packages.default + ]; + + shellHook = '' + >&2 echo + >&2 echo '🚀 Run `atticd` to get started!' + >&2 echo + ''; + }; + }; + devShell = devShells.default; + }); +} diff --git a/package.nix b/package.nix new file mode 100644 index 0000000..5ec7792 --- /dev/null +++ b/package.nix @@ -0,0 +1,63 @@ +{ lib, stdenv, rustPlatform +, pkg-config +, installShellFiles +, nix +, boost +, darwin + +# Only build the client +, clientOnly ? false + +# Only build certain crates +, crates ? if clientOnly then [ "attic-client" ] else [] +}: + +let + ignoredPaths = [ ".github" "target" "book" ]; + +in rustPlatform.buildRustPackage rec { + pname = "attic"; + version = "0.1.0"; + + src = lib.cleanSourceWith { + filter = name: type: !(type == "directory" && builtins.elem (baseNameOf name) ignoredPaths); + src = lib.cleanSource ./.; + }; + + nativeBuildInputs = [ + rustPlatform.bindgenHook + pkg-config + installShellFiles + ]; + + buildInputs = [ + nix boost + ] ++ lib.optionals stdenv.isDarwin (with darwin.apple_sdk.frameworks; [ + SystemConfiguration + ]); + + cargoHash = "sha256-9gJGY/6m6ao8srnhJ3WzDx35F56lhwZ6t0T3FSn/p7g="; + cargoBuildFlags = lib.concatMapStrings (c: "-p ${c} ") crates; + + ATTIC_DISTRIBUTOR = "attic"; + + # Recursive Nix is not stable yet + doCheck = false; + + postInstall = lib.optionalString (stdenv.hostPlatform == stdenv.buildPlatform) '' + if [[ -f $out/bin/attic ]]; then + installShellCompletion --cmd attic \ + --bash <($out/bin/attic gen-completions bash) \ + --zsh <($out/bin/attic gen-completions zsh) \ + --fish <($out/bin/attic gen-completions fish) + fi + ''; + + meta = with lib; { + description = "Multi-tenant Nix binary cache system"; + homepage = "https://github.com/zhaofengli/attic"; + license = licenses.agpl3Plus; + maintainers = with maintainers; [ zhaofengli ]; + platforms = platforms.linux ++ platforms.darwin; + }; +} diff --git a/server/Cargo.toml b/server/Cargo.toml new file mode 100644 index 0000000..8381058 --- /dev/null +++ b/server/Cargo.toml @@ -0,0 +1,96 @@ +[package] +name = "attic-server" +version = "0.1.0" +edition = "2021" +publish = false + +[lib] +name = "attic_server" +path = "src/lib.rs" + +[[bin]] +name = "atticd" +path = "src/main.rs" +doc = false + +[[bin]] +name = "atticadm" +path = "src/adm/main.rs" +doc = false + +[dependencies] +attic = { path = "../attic", default-features = false } + +anyhow = "1.0.68" +async-trait = "0.1.60" +aws-sdk-s3 = "0.22.0" +axum = "0.6.1" +axum-macros = "0.3.0" +base64 = "0.20.0" +bytes = "1.3.0" +chrono = "0.4.23" +clap = { version = "4.0", features = ["derive"] } +derivative = "2.2.0" +digest = "0.10.6" +displaydoc = "0.2.3" +enum-as-inner = "0.5.1" +futures = "0.3.25" +hex = "0.4.3" +humantime = "2.1.0" +humantime-serde = "1.1.1" +itoa = "1.0.5" +jsonwebtoken = "8.2.0" +lazy_static = "1.4.0" +maybe-owned = "0.3.4" +rand = "0.8.5" +regex = "1.7.0" +ryu = "1.0.12" +sha2 = { version = "0.10.6", features = ["asm"] } +serde = "1.0.151" +serde_json = "1.0.91" +serde_with = "2.1.0" +tokio-util = { version = "0.7.4", features = [ "io" ] } +toml = "0.5.10" +tower-http = { version = "0.3.5", features = [ "catch-panic" ] } +tracing = "0.1.37" +tracing-subscriber = "0.3.16" +uuid = { version = "1.2.2", features = ["v4"] } +console-subscriber = { version = "0.1.8", optional = true } +xdg = "2.4.1" + +[dependencies.async-compression] +version = "0.3.15" +features = [ + "tokio", + "xz", + "zstd", + "brotli", +] + +[dependencies.sea-orm] +version = "0.10.6" +features = [ + "runtime-tokio-rustls", + "macros", + "sqlx-postgres", + "sqlx-sqlite", + "debug-print", +] + +[dependencies.sea-orm-migration] +version = "0.10.6" + +[dependencies.tokio] +version = "1.23.0" +features = [ + "fs", + "io-util", + "macros", + "process", + "rt", + "rt-multi-thread", + "sync", +] + +[features] +tokio-console = ["dep:console-subscriber"] diff --git a/server/src/access/http.rs b/server/src/access/http.rs new file mode 100644 index 0000000..e91ccc0 --- /dev/null +++ b/server/src/access/http.rs @@ -0,0 +1,168 @@ +//! HTTP middlewares for access control. + +use std::str; + +use axum::{http::Request, middleware::Next, response::Response}; +use lazy_static::lazy_static; +use regex::Regex; +use sea_orm::DatabaseConnection; +use tokio::sync::OnceCell; + +use crate::access::{CachePermission, Token}; +use crate::database::{entity::cache::CacheModel, AtticDatabase}; +use crate::error::ServerResult; +use crate::{RequestState, State}; +use attic::cache::CacheName; + +lazy_static! { + static ref AUTHORIZATION_REGEX: Regex = + Regex::new(r"^(?i)((?Pbearer)|(?Pbasic))(?-i) (?P(.*))$").unwrap(); +} + +/// Auth state. +#[derive(Debug)] +pub struct AuthState { + /// The JWT token. + pub token: OnceCell, +} + +impl AuthState { + /// Returns an auth state with no authenticated user and no permissions. + pub fn new() -> Self { + Self { + token: OnceCell::new(), + } + } + + /// Finds and performs authorization for a cache. + pub async fn auth_cache( + &self, + database: &DatabaseConnection, + cache_name: &CacheName, + f: F, + ) -> ServerResult + where + F: FnOnce(CacheModel, &mut CachePermission) -> ServerResult, + { + let mut permission = if let Some(token) = self.token.get() { + token.get_permission_for_cache(cache_name) + } else { + CachePermission::default() + }; + + let cache = match database.find_cache(cache_name).await { + Ok(d) => { + if d.is_public { + permission.add_public_permissions(); + } + + d + } + Err(e) => { + if permission.can_discover() { + return Err(e); + } else { + return Err(e.into_no_discovery_permissions()); + } + } + }; + + match f(cache, &mut permission) { + Ok(t) => Ok(t), + Err(e) => { + if permission.can_discover() { + Err(e) + } else { + Err(e.into_no_discovery_permissions()) + } + } + } + } + + /// Returns permission granted for a cache. + pub fn get_permission_for_cache( + &self, + cache: &CacheName, + grant_public_permissions: bool, + ) -> CachePermission { + let mut permission = if let Some(token) = self.token.get() { + token.get_permission_for_cache(cache) + } else { + CachePermission::default() + }; + + if grant_public_permissions { + permission.add_public_permissions(); + } + + permission + } +} + +/// Performs auth. +pub async fn apply_auth(req: Request, next: Next) -> Response { + let token: Option = req + .headers() + .get("Authorization") + .and_then(|bytes| bytes.to_str().ok()) + .and_then(parse_authorization_header) + .and_then(|jwt| { + let state = req.extensions().get::().unwrap(); + let res_token = Token::from_jwt(&jwt, &state.config.token_hs256_secret.decoding); + if let Err(e) = &res_token { + tracing::debug!("Ignoring bad JWT token: {}", e); + } + res_token.ok() + }); + + if let Some(token) = token { + let req_state = req.extensions().get::().unwrap(); + req_state.auth.token.set(token).unwrap(); + tracing::trace!("Added valid token"); + } + + next.run(req).await +} + +/// Extracts the JWT from an Authorization header. +fn parse_authorization_header(authorization: &str) -> Option { + let captures = AUTHORIZATION_REGEX.captures(authorization)?; + let rest = captures.name("rest").unwrap().as_str(); + + if captures.name("bearer").is_some() { + // Bearer token + Some(rest.to_string()) + } else { + // Basic auth + let bytes = base64::decode(rest).ok()?; + + let user_pass = str::from_utf8(&bytes).ok()?; + let colon = user_pass.find(':')?; + let pass = &user_pass[colon + 1..]; + + Some(pass.to_string()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_authorization_header() { + assert_eq!( + "somepass", + parse_authorization_header("Basic c29tZXVzZXI6c29tZXBhc3M=").unwrap(), + ); + + assert_eq!( + "somepass", + parse_authorization_header("baSIC c29tZXVzZXI6c29tZXBhc3M=").unwrap(), + ); + + assert_eq!( + "some-token", + parse_authorization_header("bearer some-token").unwrap(), + ); + } +} diff --git a/server/src/access/mod.rs b/server/src/access/mod.rs new file mode 100644 index 0000000..b579b6e --- /dev/null +++ b/server/src/access/mod.rs @@ -0,0 +1,347 @@ +//! Access control. +//! +//! Access control in Attic is simple and stateless [0] - The server validates +//! the JWT against a trusted public key and allows access based on the +//! `x-attic-access` claim. +//! +//! One primary goal of the Attic Server is easy scalability. It's designed +//! to be deployed to serverless platforms like AWS Lambda and have fast +//! cold-start times. Instances are created and destoyed rapidly in response +//! to requests. +//! +//! [0] We may revisit this later :) +//! +//! ## Cache discovery +//! +//! If the JWT grants any permission at all to the requested cache name, +//! then the bearer is able to discover the presence of the cache, meaning +//! that NoSuchCache or Forbidden can be returned depending on the scenario. +//! Otherwise, the user will get a generic 401 response (Unauthorized) +//! regardless of the request (or whether the cache exists or not). +//! +//! ## Supplying the token +//! +//! The JWT can be supplied to the server in one of two ways: +//! +//! - As a normal Bearer token. +//! - As the password in Basic Auth (used by Nix). The username is ignored. +//! +//! To add the token to Nix, use the following format in `~/.config/nix/netrc`: +//! +//! ```text +//! machine attic.server.tld password eyJhb... +//! ``` +//! +//! ## Example token +//! +//! ```json +//! { +//! "sub": "meow", +//! "exp": 4102324986, +//! "https://jwt.attic.rs/v1": { +//! "caches": { +//! "cache-rw": { +//! "w": 1, +//! "r": 1 +//! }, +//! "cache-ro": { +//! "r": 1 +//! }, +//! "team-*": { +//! "w": 1, +//! "r": 1, +//! "cc": 1 +//! } +//! } +//! } +//! } +//! ``` + +pub mod http; + +#[cfg(test)] +mod tests; + +use std::collections::HashMap; + +use chrono::{DateTime, Utc}; +use displaydoc::Display; +pub use jsonwebtoken::{ + Algorithm as JwtAlgorithm, DecodingKey as JwtDecodingKey, EncodingKey as JwtEncodingKey, + Header as JwtHeader, Validation as JwtValidation, +}; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, BoolFromInt}; + +use crate::error::ServerResult; +use attic::cache::{CacheName, CacheNamePattern}; + +/// Custom claim namespace for the AtticAccess information. +/// +/// Custom claim namespaces are required by platforms like Auth0, and +/// custom claims without one will be silently dropped. +/// +/// +/// +/// Also change the `#[serde(rename)]` below if you change this. +pub const CLAIM_NAMESPACE: &str = "https://jwt.attic.rs/v1"; + +macro_rules! require_permission_function { + ($name:ident, $descr:literal, $member:ident) => { + pub fn $name(&self) -> ServerResult<()> { + if !self.$member { + tracing::debug!("Client has no {} permission", $descr); + if self.can_discover() { + Err(Error::PermissionDenied.into()) + } else { + Err(Error::NoDiscoveryPermission.into()) + } + } else { + Ok(()) + } + } + }; +} + +/// A validated JSON Web Token. +#[derive(Debug)] +pub struct Token(jsonwebtoken::TokenData); + +/// Claims of a JSON Web Token. +#[derive(Debug, Serialize, Deserialize)] +struct TokenClaims { + /// Subject. + sub: String, + + /// Expiration timestamp. + exp: usize, + + /// Attic namespace. + #[serde(rename = "https://jwt.attic.rs/v1")] + attic_ns: AtticAccess, +} + +/// Permissions granted to a client. +/// +/// This is the content of the `attic-access` claim in JWTs. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct AtticAccess { + /// Cache permissions. + /// + /// Keys here may include wildcards. + caches: HashMap, +} + +/// Permission to a single cache. +#[serde_as] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CachePermission { + /// Can pull objects from the cache. + #[serde(default = "CachePermission::permission_default")] + #[serde(skip_serializing_if = "is_false")] + #[serde(rename = "r")] + #[serde_as(as = "BoolFromInt")] + pub pull: bool, + + /// Can push objects to the cache. + #[serde(default = "CachePermission::permission_default")] + #[serde(skip_serializing_if = "is_false")] + #[serde(rename = "w")] + #[serde_as(as = "BoolFromInt")] + pub push: bool, + + /// Can delete objects from the cache. + #[serde(default = "CachePermission::permission_default")] + #[serde(skip_serializing_if = "is_false")] + #[serde(rename = "d")] + #[serde_as(as = "BoolFromInt")] + pub delete: bool, + + /// Can create the cache itself. + #[serde(default = "CachePermission::permission_default")] + #[serde(skip_serializing_if = "is_false")] + #[serde(rename = "cc")] + #[serde_as(as = "BoolFromInt")] + pub create_cache: bool, + + /// Can reconfigure the cache. + #[serde(default = "CachePermission::permission_default")] + #[serde(skip_serializing_if = "is_false")] + #[serde(rename = "cr")] + #[serde_as(as = "BoolFromInt")] + pub configure_cache: bool, + + /// Can configure retention/quota settings. + #[serde(default = "CachePermission::permission_default")] + #[serde(skip_serializing_if = "is_false")] + #[serde(rename = "cq")] + #[serde_as(as = "BoolFromInt")] + pub configure_cache_retention: bool, + + /// Can destroy the cache itself. + #[serde(default = "CachePermission::permission_default")] + #[serde(skip_serializing_if = "is_false")] + #[serde(rename = "cd")] + #[serde_as(as = "BoolFromInt")] + pub destroy_cache: bool, +} + +/// An access error. +#[derive(Debug, Display)] +#[ignore_extra_doc_attributes] +pub enum Error { + /// User has no permission to this cache. + NoDiscoveryPermission, + + /// User does not have permission to complete this action. + /// + /// This implies that there is some permission granted to the + /// user, so the user is authorized to discover the cache. + PermissionDenied, + + /// JWT error: {0} + TokenError(jsonwebtoken::errors::Error), +} + +impl Token { + /// Verifies and decodes a token. + pub fn from_jwt(token: &str, key: &JwtDecodingKey) -> ServerResult { + let validation = JwtValidation::default(); + jsonwebtoken::decode::(token, key, &validation) + .map_err(|e| Error::TokenError(e).into()) + .map(Token) + } + + /// Creates a new token with an expiration timestamp. + pub fn new(sub: String, exp: &DateTime) -> Self { + let claims = TokenClaims { + sub, + exp: exp.timestamp() as usize, + attic_ns: Default::default(), + }; + + Self(jsonwebtoken::TokenData { + header: JwtHeader::new(JwtAlgorithm::HS256), + claims, + }) + } + + /// Encodes the token. + pub fn encode(&self, key: &JwtEncodingKey) -> ServerResult { + jsonwebtoken::encode(&self.0.header, &self.0.claims, key) + .map_err(|e| Error::TokenError(e).into()) + } + + /// Returns the claims as a serializable value. + pub fn opaque_claims(&self) -> &impl Serialize { + &self.0.claims + } + + /// Returns a mutable reference to a permission entry. + pub fn get_or_insert_permission_mut( + &mut self, + pattern: CacheNamePattern, + ) -> &mut CachePermission { + use std::collections::hash_map::Entry; + + let access = self.attic_access_mut(); + match access.caches.entry(pattern) { + Entry::Occupied(v) => v.into_mut(), + Entry::Vacant(v) => v.insert(CachePermission::default()), + } + } + + /// Returns explicit permission granted for a cache. + pub fn get_permission_for_cache(&self, cache: &CacheName) -> CachePermission { + let access = self.attic_access(); + + let pattern_key = cache.to_pattern(); + if let Some(direct_match) = access.caches.get(&pattern_key) { + return direct_match.clone(); + } + + for (pattern, permission) in access.caches.iter() { + if pattern.matches(cache) { + return permission.clone(); + } + } + + CachePermission::default() + } + + fn attic_access(&self) -> &AtticAccess { + &self.0.claims.attic_ns + } + + fn attic_access_mut(&mut self) -> &mut AtticAccess { + &mut self.0.claims.attic_ns + } +} + +impl CachePermission { + /// Adds implicit grants for public caches. + pub fn add_public_permissions(&mut self) { + self.pull = true; + } + + /// Returns whether the user is allowed to discover this cache. + /// + /// This permission is implied when any permission is explicitly + /// granted. + pub const fn can_discover(&self) -> bool { + self.push + || self.pull + || self.delete + || self.create_cache + || self.configure_cache + || self.destroy_cache + || self.configure_cache_retention + } + + pub fn require_discover(&self) -> ServerResult<()> { + if !self.can_discover() { + Err(Error::NoDiscoveryPermission.into()) + } else { + Ok(()) + } + } + + require_permission_function!(require_pull, "pull", pull); + require_permission_function!(require_push, "push", push); + require_permission_function!(require_delete, "delete", delete); + require_permission_function!(require_create_cache, "create cache", create_cache); + require_permission_function!( + require_configure_cache, + "reconfigure cache", + configure_cache + ); + require_permission_function!( + require_configure_cache_retention, + "configure cache retention", + configure_cache_retention + ); + require_permission_function!(require_destroy_cache, "destroy cache", destroy_cache); + + fn permission_default() -> bool { + false + } +} + +impl Default for CachePermission { + fn default() -> Self { + Self { + pull: false, + push: false, + delete: false, + create_cache: false, + configure_cache: false, + configure_cache_retention: false, + destroy_cache: false, + } + } +} + +// bruh +fn is_false(b: &bool) -> bool { + !b +} diff --git a/server/src/access/tests.rs b/server/src/access/tests.rs new file mode 100644 index 0000000..fc4b9c5 --- /dev/null +++ b/server/src/access/tests.rs @@ -0,0 +1,76 @@ +use super::*; + +use attic::cache::CacheName; + +macro_rules! cache { + ($n:expr) => { + CacheName::new($n.to_string()).unwrap() + }; +} + +#[test] +fn test_basic() { + // "very secure secret" + let base64_secret = "dmVyeSBzZWN1cmUgc2VjcmV0"; + + let dec_key = + JwtDecodingKey::from_base64_secret(base64_secret).expect("Could not import decoding key"); + + /* + { + "sub": "meow", + "exp": 4102324986, + "https://jwt.attic.rs/v1": { + "caches": { + "cache-rw": {"r":1,"w":1}, + "cache-ro": {"r":1}, + "team-*": {"r":1,"w":1,"cc":1} + } + } + } + */ + + let token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJtZW93IiwiZXhwIjo0MTAyMzI0OTg2LCJodHRwczovL2p3dC5hdHRpYy5ycy92MSI6eyJjYWNoZXMiOnsiY2FjaGUtcnciOnsiciI6MSwidyI6MX0sImNhY2hlLXJvIjp7InIiOjF9LCJ0ZWFtLSoiOnsiciI6MSwidyI6MSwiY2MiOjF9fX19.UlsIM9bQHr9SXGAcSQcoVPo9No8Zhh6Y5xfX8vCmKmA"; + + let decoded = Token::from_jwt(token, &dec_key).unwrap(); + + let perm_rw = decoded.get_permission_for_cache(&cache! { "cache-rw" }); + + assert!(perm_rw.pull); + assert!(perm_rw.push); + assert!(!perm_rw.delete); + assert!(!perm_rw.create_cache); + + assert!(perm_rw.require_pull().is_ok()); + assert!(perm_rw.require_push().is_ok()); + assert!(perm_rw.require_delete().is_err()); + assert!(perm_rw.require_create_cache().is_err()); + + let perm_ro = decoded.get_permission_for_cache(&cache! { "cache-ro" }); + + assert!(perm_ro.pull); + assert!(!perm_ro.push); + assert!(!perm_ro.delete); + assert!(!perm_ro.create_cache); + + assert!(perm_ro.require_pull().is_ok()); + assert!(perm_ro.require_push().is_err()); + assert!(perm_ro.require_delete().is_err()); + assert!(perm_ro.require_create_cache().is_err()); + + let perm_team = decoded.get_permission_for_cache(&cache! { "team-xyz" }); + + assert!(perm_team.pull); + assert!(perm_team.push); + assert!(!perm_team.delete); + assert!(perm_team.create_cache); + + assert!(perm_team.require_pull().is_ok()); + assert!(perm_team.require_push().is_ok()); + assert!(perm_team.require_delete().is_err()); + assert!(perm_team.require_create_cache().is_ok()); + + assert!(!decoded + .get_permission_for_cache(&cache! { "forbidden-cache" }) + .can_discover()); +} diff --git a/server/src/adm/command/make_token.rs b/server/src/adm/command/make_token.rs new file mode 100644 index 0000000..3e00386 --- /dev/null +++ b/server/src/adm/command/make_token.rs @@ -0,0 +1,123 @@ +use anyhow::{anyhow, Result}; +use chrono::{Duration as ChronoDuration, Utc}; +use clap::Parser; +use humantime::Duration; + +use crate::Opts; +use attic::cache::CacheNamePattern; +use attic_server::access::Token; +use attic_server::config::Config; + +/// Generate a new token. +/// +/// For example, to generate a token for Alice with read-write access +/// to any cache starting with `dev-` and read-only access to `prod`, +/// expiring in 2 years: +/// +/// $ atticadm make-token --sub "alice" --validity "2y" --pull "dev-*" --push "dev-*" --pull "prod" +#[derive(Debug, Parser)] +pub struct MakeToken { + /// The subject of the JWT token. + #[clap(long)] + sub: String, + + /// The validity period of the JWT token. + /// + /// You can use expressions like "2 years", "3 months" + /// and "1y". + #[clap(long)] + validity: Duration, + + /// Dump the claims without signing and encoding it. + #[clap(long)] + dump_claims: bool, + + /// A cache that the token may pull from. + /// + /// The value may contain wildcards. Specify this flag multiple + /// times to allow multiple patterns. + #[clap(long = "pull", value_name = "PATTERN")] + pull_patterns: Vec, + + /// A cache that the token may push to. + /// + /// The value may contain wildcards. Specify this flag multiple + /// times to allow multiple patterns. + #[clap(long = "push", value_name = "PATTERN")] + push_patterns: Vec, + + /// A cache that the token may delete store paths from. + /// + /// The value may contain wildcards. Specify this flag multiple + /// times to allow multiple patterns. + #[clap(long = "delete", value_name = "PATTERN")] + delete_patterns: Vec, + + /// A cache that the token may create. + /// + /// The value may contain wildcards. Specify this flag multiple + /// times to allow multiple patterns. + #[clap(long = "create-cache", value_name = "PATTERN")] + create_cache_patterns: Vec, + + /// A cache that the token may configure. + /// + /// The value may contain wildcards. Specify this flag multiple + /// times to allow multiple patterns. + #[clap(long = "configure-cache", value_name = "PATTERN")] + configure_cache_patterns: Vec, + + /// A cache that the token may configure retention/quota for. + /// + /// The value may contain wildcards. Specify this flag multiple + /// times to allow multiple patterns. + #[clap(long = "configure-cache-retention", value_name = "PATTERN")] + configure_cache_retention_patterns: Vec, + + /// A cache that the token may destroy. + /// + /// The value may contain wildcards. Specify this flag multiple + /// times to allow multiple patterns. + #[clap(long = "destroy-cache", value_name = "PATTERN")] + destroy_cache_patterns: Vec, +} + +macro_rules! grant_permissions { + ($token:ident, $list:expr, $perm:ident) => { + for pattern in $list { + let mut perm = $token.get_or_insert_permission_mut(pattern.to_owned()); + perm.$perm = true; + } + }; +} + +pub async fn run(config: Config, opts: Opts) -> Result<()> { + let sub = opts.command.as_make_token().unwrap(); + let duration = ChronoDuration::from_std(sub.validity.into())?; + let exp = Utc::now() + .checked_add_signed(duration) + .ok_or_else(|| anyhow!("Expiry timestamp overflowed"))?; + + let mut token = Token::new(sub.sub.to_owned(), &exp); + + grant_permissions!(token, &sub.pull_patterns, pull); + grant_permissions!(token, &sub.push_patterns, push); + grant_permissions!(token, &sub.delete_patterns, delete); + grant_permissions!(token, &sub.create_cache_patterns, create_cache); + grant_permissions!(token, &sub.configure_cache_patterns, configure_cache); + grant_permissions!( + token, + &sub.configure_cache_retention_patterns, + configure_cache_retention + ); + grant_permissions!(token, &sub.destroy_cache_patterns, destroy_cache); + + if sub.dump_claims { + println!("{}", serde_json::to_string(token.opaque_claims())?); + } else { + let encoded_token = token.encode(&config.token_hs256_secret.encoding)?; + println!("{}", encoded_token); + } + + Ok(()) +} diff --git a/server/src/adm/command/mod.rs b/server/src/adm/command/mod.rs new file mode 100644 index 0000000..6f9403b --- /dev/null +++ b/server/src/adm/command/mod.rs @@ -0,0 +1 @@ +pub mod make_token; diff --git a/server/src/adm/main.rs b/server/src/adm/main.rs new file mode 100644 index 0000000..e104a63 --- /dev/null +++ b/server/src/adm/main.rs @@ -0,0 +1,48 @@ +mod command; + +use std::env; +use std::path::PathBuf; + +use anyhow::Result; +use clap::{Parser, Subcommand}; +use enum_as_inner::EnumAsInner; + +use attic_server::config; +use command::make_token::{self, MakeToken}; + +/// Attic server administration utilities. +#[derive(Debug, Parser)] +#[clap(version, author = "Zhaofeng Li ")] +#[clap(propagate_version = true)] +pub struct Opts { + /// Path to the config file. + #[clap(short = 'f', long)] + config: Option, + + /// The sub-command. + #[clap(subcommand)] + pub command: Command, +} + +#[derive(Debug, Subcommand, EnumAsInner)] +pub enum Command { + MakeToken(MakeToken), +} + +#[tokio::main] +async fn main() -> Result<()> { + let opts = Opts::parse(); + let config = if let Some(config_path) = &opts.config { + config::load_config_from_path(config_path) + } else if let Ok(config_env) = env::var("ATTIC_SERVER_CONFIG") { + config::load_config_from_str(&config_env) + } else { + config::load_config_from_path(&config::get_xdg_config_path()?) + }; + + match opts.command { + Command::MakeToken(_) => make_token::run(config, opts).await?, + } + + Ok(()) +} diff --git a/server/src/api/binary_cache.rs b/server/src/api/binary_cache.rs new file mode 100644 index 0000000..dca7823 --- /dev/null +++ b/server/src/api/binary_cache.rs @@ -0,0 +1,211 @@ +//! Nix Binary Cache server. +//! +//! This module implements the Nix Binary Cache API. +//! +//! The implementation is based on the specifications at . + +use std::path::PathBuf; + +use axum::{ + body::StreamBody, + extract::{Extension, Path}, + http::StatusCode, + response::{IntoResponse, Redirect, Response}, + routing::get, + Router, +}; +use serde::Serialize; +use tokio_util::io::ReaderStream; +use tracing::instrument; + +use crate::database::AtticDatabase; +use crate::error::{ServerError, ServerResult}; +use crate::narinfo::NarInfo; +use crate::nix_manifest; +use crate::storage::Download; +use crate::{RequestState, State}; +use attic::cache::CacheName; +use attic::mime; +use attic::nix_store::StorePathHash; + +/// Nix cache information. +/// +/// An example of a correct response is as follows: +/// +/// ```text +/// StoreDir: /nix/store +/// WantMassQuery: 1 +/// Priority: 40 +/// ``` +#[derive(Debug, Clone, Serialize)] +struct NixCacheInfo { + /// Whether this binary cache supports bulk queries. + #[serde(rename = "WantMassQuery")] + want_mass_query: bool, + + /// The Nix store path this binary cache uses. + #[serde(rename = "StoreDir")] + store_dir: PathBuf, + + /// The priority of the binary cache. + /// + /// A lower number denotes a higher priority. + /// has a priority of 40. + #[serde(rename = "Priority")] + priority: i32, +} + +impl IntoResponse for NixCacheInfo { + fn into_response(self) -> Response { + match nix_manifest::to_string(&self) { + Ok(body) => Response::builder() + .status(StatusCode::OK) + .header("Content-Type", mime::NIX_CACHE_INFO) + .body(body) + .unwrap() + .into_response(), + Err(e) => e.into_response(), + } + } +} + +/// Gets information on a cache. +#[instrument(skip_all, fields(cache_name))] +async fn get_nix_cache_info( + Extension(state): Extension, + Extension(req_state): Extension, + Path(cache_name): Path, +) -> ServerResult { + let database = state.database().await?; + let cache = req_state + .auth + .auth_cache(database, &cache_name, |cache, permission| { + permission.require_pull()?; + Ok(cache) + }) + .await?; + + let info = NixCacheInfo { + want_mass_query: true, + store_dir: cache.store_dir.into(), + priority: cache.priority, + }; + + Ok(info) +} + +/// Gets various information on a store path hash. +/// +/// `/:cache/:path`, which may be one of +/// - GET `/:cache/{storePathHash}.narinfo` +/// - HEAD `/:cache/{storePathHash}.narinfo` +/// - GET `/:cache/{storePathHash}.ls` (not implemented) +#[instrument(skip_all, fields(cache_name, path))] +async fn get_store_path_info( + Extension(state): Extension, + Extension(req_state): Extension, + Path((cache_name, path)): Path<(CacheName, String)>, +) -> ServerResult { + let components: Vec<&str> = path.splitn(2, '.').collect(); + + if components.len() != 2 { + return Err(ServerError::NotFound); + } + + // TODO: Other endpoints + if components[1] != "narinfo" { + return Err(ServerError::NotFound); + } + + let store_path_hash = StorePathHash::new(components[0].to_string())?; + + tracing::debug!( + "Received request for {}.narinfo in {:?}", + store_path_hash.as_str(), + cache_name + ); + + let (object, cache, nar) = state + .database() + .await? + .find_object_by_store_path_hash(&cache_name, &store_path_hash) + .await?; + + let permission = req_state + .auth + .get_permission_for_cache(&cache_name, cache.is_public); + permission.require_pull()?; + + let mut narinfo = object.to_nar_info(&nar)?; + + if narinfo.signature().is_none() { + let keypair = cache.keypair()?; + narinfo.sign(&keypair); + } + + Ok(narinfo) +} + +/// Gets a NAR. +/// +/// - GET `:cache/nar/{storePathHash}.nar` +/// +/// Here we use the store path hash not the NAR hash or file hash +/// for better logging. In reality, the files are deduplicated by +/// content-addressing. +#[instrument(skip_all, fields(cache_name, path))] +async fn get_nar( + Extension(state): Extension, + Extension(req_state): Extension, + Path((cache_name, path)): Path<(CacheName, String)>, +) -> ServerResult { + let components: Vec<&str> = path.splitn(2, '.').collect(); + + if components.len() != 2 { + return Err(ServerError::NotFound); + } + + if components[1] != "nar" { + return Err(ServerError::NotFound); + } + + let store_path_hash = StorePathHash::new(components[0].to_string())?; + + tracing::debug!( + "Received request for {}.nar in {:?}", + store_path_hash.as_str(), + cache_name + ); + + let database = state.database().await?; + + let (object, cache, nar) = database + .find_object_by_store_path_hash(&cache_name, &store_path_hash) + .await?; + + let permission = req_state + .auth + .get_permission_for_cache(&cache_name, cache.is_public); + permission.require_pull()?; + + database.bump_object_last_accessed(object.id).await?; + + let remote_file = nar.remote_file.0; + let backend = state.storage().await?; + match backend.download_file_db(&remote_file).await? { + Download::Redirect(uri) => Ok(Redirect::temporary(&uri).into_response()), + Download::Stream(stream) => { + let stream = ReaderStream::new(stream); + let body = StreamBody::new(stream); + + Ok(body.into_response()) + } + } +} + +pub fn get_router() -> Router { + Router::new() + .route("/:cache/nix-cache-info", get(get_nix_cache_info)) + .route("/:cache/:path", get(get_store_path_info)) + .route("/:cache/nar/:path", get(get_nar)) +} diff --git a/server/src/api/mod.rs b/server/src/api/mod.rs new file mode 100644 index 0000000..309d358 --- /dev/null +++ b/server/src/api/mod.rs @@ -0,0 +1,17 @@ +//! HTTP API. + +mod binary_cache; +mod v1; + +use axum::{response::Html, routing::get, Router}; + +async fn placeholder() -> Html<&'static str> { + Html(include_str!("placeholder.html")) +} + +pub(crate) fn get_router() -> Router { + Router::new() + .route("/", get(placeholder)) + .merge(binary_cache::get_router()) + .merge(v1::get_router()) +} diff --git a/server/src/api/placeholder.html b/server/src/api/placeholder.html new file mode 100644 index 0000000..23bdb5c --- /dev/null +++ b/server/src/api/placeholder.html @@ -0,0 +1,37 @@ + + + + Attic Binary Cache + + + +
+┏━━━━━━━━━━━━━━━━┑
+┃┏━━━ @ ━━━ @ ━━┓┃
+┃┃              ┃┃
+┃┃$ attic push 
┃┃ +┃┃ ┃┃ +┃┗━━━ ╰─────╯ ━━┛┃ +┗━━━━━━━━━━━━━━━━┛ + ╲ ############### ╲ + ╲ ############### ╲ + ╲ ############### ╲ + ━━━━━━━━━━━━━━━━━━ +
+ + diff --git a/server/src/api/v1/cache_config.rs b/server/src/api/v1/cache_config.rs new file mode 100644 index 0000000..8fd1569 --- /dev/null +++ b/server/src/api/v1/cache_config.rs @@ -0,0 +1,231 @@ +//! Cache configuration endpoint. + +use anyhow::anyhow; +use axum::extract::{Extension, Json, Path}; +use chrono::Utc; +use sea_orm::sea_query::{Expr, OnConflict}; +use sea_orm::ActiveValue::Set; +use sea_orm::{ColumnTrait, EntityTrait, QueryFilter}; +use tracing::instrument; + +use crate::database::entity::cache::{self, Entity as Cache}; +use crate::database::entity::Json as DbJson; +use crate::error::{ServerError, ServerResult}; +use crate::{RequestState, State}; +use attic::api::v1::cache_config::{ + CacheConfig, CreateCacheRequest, KeypairConfig, RetentionPeriodConfig, +}; +use attic::cache::CacheName; +use attic::signing::NixKeypair; + +#[instrument(skip_all, fields(cache_name))] +pub(crate) async fn get_cache_config( + Extension(state): Extension, + Extension(req_state): Extension, + Path(cache_name): Path, +) -> ServerResult> { + let database = state.database().await?; + let cache = req_state + .auth + .auth_cache(database, &cache_name, |cache, permission| { + permission.require_pull()?; + Ok(cache) + }) + .await?; + + let public_key = cache.keypair()?.export_public_key(); + + let retention_period_config = if let Some(period) = cache.retention_period { + RetentionPeriodConfig::Period(period as u32) + } else { + RetentionPeriodConfig::Global + }; + + Ok(Json(CacheConfig { + substituter_endpoint: Some(req_state.substituter_endpoint(cache_name)?), + api_endpoint: Some(req_state.api_endpoint()?), + keypair: None, + public_key: Some(public_key), + is_public: Some(cache.is_public), + store_dir: Some(cache.store_dir), + priority: Some(cache.priority), + upstream_cache_key_names: Some(cache.upstream_cache_key_names.0), + retention_period: Some(retention_period_config), + })) +} + +#[instrument(skip_all, fields(cache_name, payload))] +pub(crate) async fn configure_cache( + Extension(state): Extension, + Extension(req_state): Extension, + Path(cache_name): Path, + Json(payload): Json, +) -> ServerResult<()> { + let database = state.database().await?; + let (cache, permission) = req_state + .auth + .auth_cache(database, &cache_name, |cache, permission| { + permission.require_configure_cache()?; + Ok((cache, permission.clone())) + }) + .await?; + + let mut update = cache::ActiveModel { + id: Set(cache.id), + ..Default::default() + }; + + let mut modified = false; + + if let Some(keypair_cfg) = payload.keypair { + let keypair = match keypair_cfg { + KeypairConfig::Generate => NixKeypair::generate(cache_name.as_str())?, + KeypairConfig::Keypair(k) => k, + }; + update.keypair = Set(keypair.export_keypair()); + modified = true; + } + + if let Some(is_public) = payload.is_public { + update.is_public = Set(is_public); + modified = true; + } + + if let Some(store_dir) = payload.store_dir { + update.store_dir = Set(store_dir); + modified = true; + } + + if let Some(priority) = payload.priority { + update.priority = Set(priority); + modified = true; + } + + if let Some(upstream_cache_key_names) = payload.upstream_cache_key_names { + update.upstream_cache_key_names = Set(DbJson(upstream_cache_key_names)); + modified = true; + } + + if let Some(retention_period_config) = payload.retention_period { + permission.require_configure_cache_retention()?; + + match retention_period_config { + RetentionPeriodConfig::Global => { + update.retention_period = Set(None); + } + RetentionPeriodConfig::Period(period) => { + update.retention_period = Set(Some(period.try_into().map_err(|_| { + ServerError::RequestError(anyhow!("Invalid retention period")) + })?)); + } + } + + modified = true; + } + + if modified { + Cache::update(update) + .exec(database) + .await + .map_err(ServerError::database_error)?; + + Ok(()) + } else { + Err(ServerError::RequestError(anyhow!( + "No modifiable fields were set." + ))) + } +} + +#[instrument(skip_all, fields(cache_name))] +pub(crate) async fn destroy_cache( + Extension(state): Extension, + Extension(req_state): Extension, + Path(cache_name): Path, +) -> ServerResult<()> { + let database = state.database().await?; + let cache = req_state + .auth + .auth_cache(database, &cache_name, |cache, permission| { + permission.require_destroy_cache()?; + Ok(cache) + }) + .await?; + + if state.config.soft_delete_caches { + // Perform soft deletion + let deletion = Cache::update_many() + .col_expr(cache::Column::DeletedAt, Expr::value(Some(Utc::now()))) + .filter(cache::Column::Id.eq(cache.id)) + .filter(cache::Column::DeletedAt.is_null()) + .exec(database) + .await + .map_err(ServerError::database_error)?; + + if deletion.rows_affected == 0 { + // Someone raced to (soft) delete the cache before us + Err(ServerError::NoSuchCache) + } else { + Ok(()) + } + } else { + // Perform hard deletion + let deletion = Cache::delete_many() + .filter(cache::Column::Id.eq(cache.id)) + .filter(cache::Column::DeletedAt.is_null()) // don't operate on soft-deleted caches + .exec(database) + .await + .map_err(ServerError::database_error)?; + + if deletion.rows_affected == 0 { + // Someone raced to (soft) delete the cache before us + Err(ServerError::NoSuchCache) + } else { + Ok(()) + } + } +} + +#[instrument(skip_all, fields(cache_name, payload))] +pub(crate) async fn create_cache( + Extension(state): Extension, + Extension(req_state): Extension, + Path(cache_name): Path, + Json(payload): Json, +) -> ServerResult<()> { + let permission = req_state.auth.get_permission_for_cache(&cache_name, false); + permission.require_create_cache()?; + + let database = state.database().await?; + + let keypair = match payload.keypair { + KeypairConfig::Generate => NixKeypair::generate(cache_name.as_str())?, + KeypairConfig::Keypair(k) => k, + }; + + let num_inserted = Cache::insert(cache::ActiveModel { + name: Set(cache_name.to_string()), + keypair: Set(keypair.export_keypair()), + is_public: Set(payload.is_public), + store_dir: Set(payload.store_dir), + priority: Set(payload.priority), + upstream_cache_key_names: Set(DbJson(payload.upstream_cache_key_names)), + created_at: Set(Utc::now()), + ..Default::default() + }) + .on_conflict( + OnConflict::column(cache::Column::Name) + .do_nothing() + .to_owned(), + ) + .exec_without_returning(database) + .await + .map_err(ServerError::database_error)?; + + if num_inserted == 0 { + // The cache already exists + Err(ServerError::CacheAlreadyExists) + } else { + Ok(()) + } +} diff --git a/server/src/api/v1/get_missing_paths.rs b/server/src/api/v1/get_missing_paths.rs new file mode 100644 index 0000000..ce8e427 --- /dev/null +++ b/server/src/api/v1/get_missing_paths.rs @@ -0,0 +1,69 @@ +use std::collections::HashSet; + +use axum::extract::{Extension, Json}; +use sea_orm::entity::prelude::*; +use sea_orm::{FromQueryResult, QuerySelect}; +use tracing::instrument; + +use crate::database::entity::cache; +use crate::database::entity::object::{self, Entity as Object}; +use crate::error::{ServerError, ServerResult}; +use crate::{RequestState, State}; +use attic::api::v1::get_missing_paths::{GetMissingPathsRequest, GetMissingPathsResponse}; +use attic::nix_store::StorePathHash; + +#[derive(FromQueryResult)] +struct StorePathHashOnly { + store_path_hash: String, +} + +/// Gets information on missing paths in a cache. +/// +/// Requires "push" permission as it essentially allows probing +/// of cache contents. +#[instrument(skip_all, fields(payload))] +pub(crate) async fn get_missing_paths( + Extension(state): Extension, + Extension(req_state): Extension, + Json(payload): Json, +) -> ServerResult> { + let database = state.database().await?; + req_state + .auth + .auth_cache(database, &payload.cache, |_, permission| { + permission.require_push()?; + Ok(()) + }) + .await?; + + let requested_hashes: HashSet = payload + .store_path_hashes + .iter() + .map(|h| h.as_str().to_owned()) + .collect(); + + let query_in = requested_hashes.iter().map(|h| Value::from(h.to_owned())); + + let result: Vec = Object::find() + .select_only() + .column_as(object::Column::StorePathHash, "store_path_hash") + .join(sea_orm::JoinType::InnerJoin, object::Relation::Cache.def()) + .filter(cache::Column::Name.eq(payload.cache.as_str())) + .filter(object::Column::StorePathHash.is_in(query_in)) + .into_model::() + .all(database) + .await + .map_err(ServerError::database_error)?; + + let found_hashes: HashSet = result.into_iter().map(|row| row.store_path_hash).collect(); + + // Safety: All requested_hashes are validated `StorePathHash`es. + // No need to pay the cost of checking again + #[allow(unsafe_code)] + let missing_paths = requested_hashes + .difference(&found_hashes) + .map(|h| unsafe { StorePathHash::new_unchecked(h.to_string()) }) + .collect(); + + Ok(Json(GetMissingPathsResponse { missing_paths })) +} diff --git a/server/src/api/v1/mod.rs b/server/src/api/v1/mod.rs new file mode 100644 index 0000000..bc34fd7 --- /dev/null +++ b/server/src/api/v1/mod.rs @@ -0,0 +1,37 @@ +mod cache_config; +mod get_missing_paths; +mod upload_path; + +use axum::{ + routing::{delete, get, patch, post, put}, + Router, +}; + +pub(crate) fn get_router() -> Router { + Router::new() + .route( + "/_api/v1/get-missing-paths", + post(get_missing_paths::get_missing_paths), + ) + .route("/_api/v1/upload-path", put(upload_path::upload_path)) + .route( + "/:cache/attic-cache-info", + get(cache_config::get_cache_config), + ) + .route( + "/_api/v1/cache-config/:cache", + get(cache_config::get_cache_config), + ) + .route( + "/_api/v1/cache-config/:cache", + post(cache_config::create_cache), + ) + .route( + "/_api/v1/cache-config/:cache", + patch(cache_config::configure_cache), + ) + .route( + "/_api/v1/cache-config/:cache", + delete(cache_config::destroy_cache), + ) +} diff --git a/server/src/api/v1/upload_path.rs b/server/src/api/v1/upload_path.rs new file mode 100644 index 0000000..06ebb37 --- /dev/null +++ b/server/src/api/v1/upload_path.rs @@ -0,0 +1,380 @@ +use std::io; + +use std::marker::Unpin; +use std::sync::Arc; + +use anyhow::anyhow; +use async_compression::tokio::bufread::{BrotliEncoder, XzEncoder, ZstdEncoder}; +use axum::{ + extract::{BodyStream, Extension}, + http::HeaderMap, +}; +use chrono::Utc; +use digest::Output as DigestOutput; +use futures::StreamExt; +use sea_orm::entity::prelude::*; +use sea_orm::ActiveValue::Set; +use sea_orm::TransactionTrait; +use sha2::{Digest, Sha256}; +use tokio::io::{AsyncRead, BufReader}; +use tokio::sync::OnceCell; +use tokio_util::io::StreamReader; +use tracing::instrument; +use uuid::Uuid; + +use crate::config::CompressionType; +use crate::error::{ServerError, ServerResult}; +use crate::narinfo::Compression; +use crate::{RequestState, State}; +use attic::api::v1::upload_path::UploadPathNarInfo; +use attic::hash::Hash; +use attic::stream::StreamHasher; +use attic::util::Finally; + +use crate::database::entity::cache; +use crate::database::entity::nar::{self, Entity as Nar, NarState}; +use crate::database::entity::object::{self, Entity as Object}; +use crate::database::entity::Json; +use crate::database::{AtticDatabase, NarGuard}; + +type CompressorFn = Box Box + Send>; + +/// Applies compression to a stream, computing hashes along the way. +/// +/// Our strategy is to stream into a temporary file on S3, performing compression +/// and computing the hashes along the way. We delete the temporary file on S3 +/// if the hashes do not match. +/// +/// ```text +/// ┌───────────────────────────────────►NAR Hash +/// │ +/// │ +/// ├───────────────────────────────────►NAR Size +/// │ +/// ┌─────┴────┐ ┌──────────┐ ┌───────────┐ +/// NAR Stream──►│NAR Hasher├─►│Compressor├─►│File Hasher├─►File Stream +/// └──────────┘ └──────────┘ └─────┬─────┘ +/// │ +/// ├───────►File Hash +/// │ +/// │ +/// └───────►File Size +/// ``` +struct CompressionStream { + stream: Box, + nar_compute: Arc, usize)>>, + file_compute: Arc, usize)>>, +} + +trait UploadPathNarInfoExt { + fn to_active_model(&self) -> object::ActiveModel; +} + +/// Uploads a new object to the cache. +/// +/// When clients request to upload an object, we first try to increment +/// the `holders_count` of one `nar` row with same NAR hash. If rows were +/// updated, it means the NAR exists in the global cache and we can deduplicate +/// after confirming the NAR hash ("Deduplicate" case). Otherwise, we perform +/// a new upload to S3 ("New NAR" case). +#[instrument(skip_all)] +#[axum_macros::debug_handler] +pub(crate) async fn upload_path( + Extension(state): Extension, + Extension(req_state): Extension, + headers: HeaderMap, + stream: BodyStream, +) -> ServerResult { + let upload_info: UploadPathNarInfo = { + let header = headers + .get("X-Attic-Nar-Info") + .ok_or_else(|| ServerError::RequestError(anyhow!("X-Attic-Nar-Info must be set")))?; + + serde_json::from_slice(header.as_bytes()).map_err(ServerError::request_error)? + }; + let cache_name = &upload_info.cache; + + let database = state.database().await?; + let cache = req_state + .auth + .auth_cache(database, cache_name, |cache, permission| { + permission.require_push()?; + Ok(cache) + }) + .await?; + + let stream = StreamReader::new( + stream.map(|r| r.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string()))), + ); + + // Try to acquire a lock on an existing NAR + let existing_nar = database.find_and_lock_nar(&upload_info.nar_hash).await?; + match existing_nar { + Some(existing_nar) => { + // Deduplicate + upload_path_dedup(cache, upload_info, stream, existing_nar, database).await + } + None => { + // New NAR + upload_path_new(cache, upload_info, stream, database, &state).await + } + } +} + +/// Uploads a path when there is already a matching NAR in the global cache. +async fn upload_path_dedup( + cache: cache::Model, + upload_info: UploadPathNarInfo, + stream: impl AsyncRead + Unpin, + existing_nar: NarGuard, + database: &DatabaseConnection, +) -> ServerResult { + let (mut stream, nar_compute) = StreamHasher::new(stream, Sha256::new()); + tokio::io::copy(&mut stream, &mut tokio::io::sink()) + .await + .map_err(ServerError::request_error)?; + + // FIXME: errors + let (nar_hash, nar_size) = nar_compute.get().unwrap(); + let nar_hash = Hash::Sha256(nar_hash.as_slice().try_into().unwrap()); + + // Confirm that the NAR Hash and Size are correct + if nar_hash.to_typed_base16() != existing_nar.nar_hash + || *nar_size != upload_info.nar_size + || *nar_size != existing_nar.nar_size as usize + { + return Err(ServerError::RequestError(anyhow!("Bad NAR Hash or Size"))); + } + + // Finally... + let txn = database + .begin() + .await + .map_err(ServerError::database_error)?; + + // Create a mapping granting the local cache access to the NAR + Object::delete_many() + .filter(object::Column::CacheId.eq(cache.id)) + .filter(object::Column::StorePathHash.eq(upload_info.store_path_hash.to_string())) + .exec(&txn) + .await + .map_err(ServerError::database_error)?; + Object::insert({ + let mut new_object = upload_info.to_active_model(); + new_object.cache_id = Set(cache.id); + new_object.nar_id = Set(existing_nar.id); + new_object.created_at = Set(Utc::now()); + new_object + }) + .exec(&txn) + .await + .map_err(ServerError::database_error)?; + + txn.commit().await.map_err(ServerError::database_error)?; + + // Ensure it's not unlocked earlier + drop(existing_nar); + + // TODO + Ok("Success".to_string()) +} + +/// Uploads a path when there is no matching NAR in the global cache. +/// +/// It's okay if some other client races to upload the same NAR before +/// us. The `nar` table can hold duplicate NARs which can be deduplicated +/// in a background process. +async fn upload_path_new( + cache: cache::Model, + upload_info: UploadPathNarInfo, + stream: impl AsyncRead + Send + Unpin + 'static, + database: &DatabaseConnection, + state: &State, +) -> ServerResult { + let compression_config = &state.config.compression; + let compression: Compression = compression_config.r#type.into(); + let level = compression_config.level(); + let compressor: CompressorFn<_> = match compression_config.r#type { + CompressionType::None => Box::new(|c| Box::new(c)), + CompressionType::Brotli => { + Box::new(move |s| Box::new(BrotliEncoder::with_quality(s, level))) + } + CompressionType::Zstd => Box::new(move |s| Box::new(ZstdEncoder::with_quality(s, level))), + CompressionType::Xz => Box::new(move |s| Box::new(XzEncoder::with_quality(s, level))), + }; + + let backend = state.storage().await?; + + let key = format!("{}.nar", Uuid::new_v4()); + + let remote_file = backend.make_db_reference(key.clone()).await?; + let remote_file_id = remote_file.remote_file_id(); + let nar_id = { + let nar_size_db = + i64::try_from(upload_info.nar_size).map_err(ServerError::request_error)?; + let model = nar::ActiveModel { + state: Set(NarState::PendingUpload), + compression: Set(compression.to_string()), + + // Untrusted data - To be confirmed later + nar_hash: Set(upload_info.nar_hash.to_typed_base16()), + nar_size: Set(nar_size_db), + + remote_file: Set(Json(remote_file)), + remote_file_id: Set(remote_file_id), + + created_at: Set(Utc::now()), + ..Default::default() + }; + + let insertion = Nar::insert(model) + .exec(database) + .await + .map_err(ServerError::database_error)?; + + insertion.last_insert_id + }; + + let cleanup = Finally::new({ + let database = database.clone(); + let nar_model = nar::ActiveModel { + id: Set(nar_id), + ..Default::default() + }; + let backend = backend.clone(); + let key = key.clone(); + + async move { + tracing::warn!("Error occurred - Cleaning up uploaded file and NAR entry"); + + if let Err(e) = backend.delete_file(key).await { + tracing::warn!("Failed to clean up failed upload: {}", e); + } + + if let Err(e) = Nar::delete(nar_model).exec(&database).await { + tracing::warn!("Failed to unregister failed NAR: {}", e); + } + } + }); + + let mut stream = CompressionStream::new(stream, compressor); + + // Stream the object to S3 + backend + .upload_file(key, stream.stream()) + .await + .map_err(ServerError::remote_file_error)?; + + // Confirm that the NAR Hash and Size are correct + // FIXME: errors + let (nar_hash, nar_size) = stream.nar_hash_and_size().unwrap(); + let (file_hash, file_size) = stream.file_hash_and_size().unwrap(); + + let nar_hash = Hash::Sha256(nar_hash.as_slice().try_into().unwrap()); + let file_hash = Hash::Sha256(file_hash.as_slice().try_into().unwrap()); + + if nar_hash != upload_info.nar_hash || *nar_size != upload_info.nar_size { + return Err(ServerError::RequestError(anyhow!("Bad NAR Hash or Size"))); + } + + // Finally... + let txn = database + .begin() + .await + .map_err(ServerError::database_error)?; + + // Update the file hash and size, and set the nar to valid + let file_size_db = i64::try_from(*file_size).map_err(ServerError::request_error)?; + Nar::update(nar::ActiveModel { + id: Set(nar_id), + state: Set(NarState::Valid), + file_hash: Set(Some(file_hash.to_typed_base16())), + file_size: Set(Some(file_size_db)), + ..Default::default() + }) + .exec(&txn) + .await + .map_err(ServerError::database_error)?; + + // Create a mapping granting the local cache access to the NAR + Object::delete_many() + .filter(object::Column::CacheId.eq(cache.id)) + .filter(object::Column::StorePathHash.eq(upload_info.store_path_hash.to_string())) + .exec(&txn) + .await + .map_err(ServerError::database_error)?; + Object::insert({ + let mut new_object = upload_info.to_active_model(); + new_object.cache_id = Set(cache.id); + new_object.nar_id = Set(nar_id); + new_object.created_at = Set(Utc::now()); + new_object + }) + .exec(&txn) + .await + .map_err(ServerError::database_error)?; + + txn.commit().await.map_err(ServerError::database_error)?; + + cleanup.cancel(); + + // TODO + Ok("Success".to_string()) +} + +impl CompressionStream { + fn new(stream: R, compressor: CompressorFn>>) -> Self + where + R: AsyncRead + Unpin + Send + 'static, + { + // compute NAR hash and size + let (stream, nar_compute) = StreamHasher::new(stream, Sha256::new()); + + // compress NAR + let stream = compressor(BufReader::new(stream)); + + // compute file hash and size + let (stream, file_compute) = StreamHasher::new(stream, Sha256::new()); + + Self { + stream: Box::new(stream), + nar_compute, + file_compute, + } + } + + /// Returns the stream of the compressed object. + fn stream(&mut self) -> &mut (impl AsyncRead + Unpin) { + &mut self.stream + } + + /// Returns the NAR hash and size. + /// + /// The hash is only finalized when the stream is fully read. + /// Otherwise, returns `None`. + fn nar_hash_and_size(&self) -> Option<&(DigestOutput, usize)> { + self.nar_compute.get() + } + + /// Returns the file hash and size. + /// + /// The hash is only finalized when the stream is fully read. + /// Otherwise, returns `None`. + fn file_hash_and_size(&self) -> Option<&(DigestOutput, usize)> { + self.file_compute.get() + } +} + +impl UploadPathNarInfoExt for UploadPathNarInfo { + fn to_active_model(&self) -> object::ActiveModel { + object::ActiveModel { + store_path_hash: Set(self.store_path_hash.to_string()), + store_path: Set(self.store_path.clone()), + references: Set(Json(self.references.clone())), + deriver: Set(self.deriver.clone()), + sigs: Set(Json(self.sigs.clone())), + ca: Set(self.ca.clone()), + ..Default::default() + } + } +} diff --git a/server/src/config-template.toml b/server/src/config-template.toml new file mode 100644 index 0000000..ea1de74 --- /dev/null +++ b/server/src/config-template.toml @@ -0,0 +1,105 @@ +# Socket address to listen on +listen = "[::]:8080" + +# Allowed `Host` headers +# +# This _must_ be configured for production use. If unconfigured or the +# list is empty, all `Host` headers are allowed. +allowed-hosts = [] + +# The canonical API endpoint of this server +# +# This is the endpoint exposed to clients in `cache-config` responses. +# +# This _must_ be configured for production use. If not configured, the +# API endpoint is synthesized from the client's `Host` header which may +# be insecure. +# +# The API endpoint _must_ end with a slash (e.g., `https://domain.tld/attic/` +# not `https://domain.tld/attic`). +#api-endpoint = "https://your.domain.tld/" + +# Whether to soft-delete caches +# +# If this is enabled, caches are soft-deleted instead of actually +# removed from the database. Note that soft-deleted caches cannot +# have their names reused as long as the original database records +# are there. +#soft-delete-caches = false + +# JWT signing token +# +# Set this to the Base64 encoding of some random data. +token-hs256-secret-base64 = "%token_hs256_secret_base64%" + +# Database connection +[database] +# Connection URL +# +# For production use it's recommended to use PostgreSQL. +url = "%database_url%" + +# Whether to enable sending on periodic heartbeat queries +# +# If enabled, a heartbeat query will be sent every minute +#heartbeat = false + +# File storage configuration +[storage] +# Storage type +# +# Can be "local" or "s3". +type = "local" + +# ## Local storage + +# The directory to store all files under +path = "%storage_path%" + +# ## S3 Storage (set type to "s3" and uncomment below) + +# The AWS region +#region = "us-east-1" + +# The name of the bucket +#bucket = "some-bucket" + +# Custom S3 endpoint +# +# Set this if you are using an S3-compatible object storage (e.g., Minio). +#endpoint = "https://xxx.r2.cloudflarestorage.com" + +# Credentials +# +# If unset, the credentials are read from the `AWS_ACCESS_KEY_ID` and +# `AWS_SECRET_ACCESS_KEY` environment variables. +#[storage.credentials] +# access_key_id = "" +# secret_access_key = "" + +# Compression +[compression] +# Compression type +# +# Can be "none", "brotli", "zstd", or "xz" +type = "zstd" + +# Compression level +#level = 8 + +# Garbage collection +[garbage-collection] +# The frequency to run garbage collection at +# +# By default it's 12 hours. You can use natural language +# to specify the interval, like "1 day". +# +# If zero, automatic garbage collection is disabled, but +# it can still be run manually with `atticd --mode garbage-collector-once`. +interval = "12 hours" + +# Default retention period +# +# Zero (default) means time-based garbage-collection is +# disabled by default. You can enable it on a per-cache basis. +#default-retention-period = "6 months" diff --git a/server/src/config.rs b/server/src/config.rs new file mode 100644 index 0000000..ee6f88c --- /dev/null +++ b/server/src/config.rs @@ -0,0 +1,273 @@ +//! Server configuration. + +use std::net::SocketAddr; +use std::path::{Path, PathBuf}; +use std::time::Duration; + +use async_compression::Level as CompressionLevel; +use derivative::Derivative; +use serde::{de, Deserialize}; +use xdg::BaseDirectories; + +use crate::access::{JwtDecodingKey, JwtEncodingKey}; +use crate::narinfo::Compression as NixCompression; +use crate::storage::{LocalStorageConfig, S3StorageConfig}; + +/// Application prefix in XDG base directories. +/// +/// This will be concatenated into `$XDG_CONFIG_HOME/attic`. +const XDG_PREFIX: &str = "attic"; + +#[derive(Clone)] +pub struct JwtKeys { + pub decoding: JwtDecodingKey, + pub encoding: JwtEncodingKey, +} + +/// Configuration for the Attic Server. +#[derive(Clone, Derivative, Deserialize)] +#[derivative(Debug)] +#[serde(deny_unknown_fields)] +pub struct Config { + /// Socket address to listen on. + #[serde(default = "default_listen_address")] + pub listen: SocketAddr, + + /// Allowed `Host` headers. + /// + /// This _must_ be configured for production use. If unconfigured or the + /// list is empty, all `Host` headers are allowed. + #[serde(rename = "allowed-hosts")] + #[serde(default = "Vec::new")] + pub allowed_hosts: Vec, + + /// The canonical API endpoint of this server. + /// + /// This is the endpoint exposed to clients in `cache-config` responses. + /// + /// This _must_ be configured for production use. If not configured, the + /// API endpoint is synthesized from the client's `Host` header which may + /// be insecure. + /// + /// The API endpoint _must_ end with a slash (e.g., `https://domain.tld/attic/` + /// not `https://domain.tld/attic`). + #[serde(rename = "api-endpoint")] + pub api_endpoint: Option, + + /// Whether to soft-delete caches. + /// + /// If this is enabled, caches are soft-deleted instead of actually + /// removed from the database. Note that soft-deleted caches cannot + /// have their names reused as long as the original database records + /// are there. + #[serde(rename = "soft-delete-caches")] + #[serde(default = "default_soft_delete_caches")] + pub soft_delete_caches: bool, + + /// Database connection. + pub database: DatabaseConfig, + + /// Storage. + pub storage: StorageConfig, + + /// Compression. + #[serde(default = "Default::default")] + pub compression: CompressionConfig, + + /// Garbage collection. + #[serde(rename = "garbage-collection")] + #[serde(default = "Default::default")] + pub garbage_collection: GarbageCollectionConfig, + + /// JSON Web Token HMAC secret. + /// + /// Set this to the base64 encoding of a randomly generated secret. + #[serde(rename = "token-hs256-secret-base64")] + #[serde(deserialize_with = "deserialize_base64_jwt_secret")] + #[derivative(Debug = "ignore")] + pub token_hs256_secret: JwtKeys, +} + +/// Database connection configuration. +#[derive(Debug, Clone, Deserialize)] +pub struct DatabaseConfig { + /// Connection URL. + pub url: String, + + /// Whether to enable sending of periodic heartbeat queries. + /// + /// If enabled, a heartbeat query will be sent every minute. + #[serde(default = "default_db_heartbeat")] + pub heartbeat: bool, +} + +/// File storage configuration. +#[derive(Debug, Clone, Deserialize)] +#[serde(tag = "type")] +pub enum StorageConfig { + /// Local file storage. + #[serde(rename = "local")] + Local(LocalStorageConfig), + + /// S3 storage. + #[serde(rename = "s3")] + S3(S3StorageConfig), +} + +/// Compression configuration. +#[derive(Debug, Clone, Deserialize)] +pub struct CompressionConfig { + /// Compression type. + pub r#type: CompressionType, + + /// Compression level. + /// + /// If unspecified, Attic will choose a default one. + pub level: Option, +} + +/// Compression type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] +pub enum CompressionType { + /// No compression. + #[serde(rename = "none")] + None, + + /// Brotli. + #[serde(rename = "brotli")] + Brotli, + + /// ZSTD. + #[serde(rename = "zstd")] + Zstd, + + /// XZ. + #[serde(rename = "xz")] + Xz, +} + +/// Garbage collection config. +#[derive(Debug, Clone, Deserialize)] +pub struct GarbageCollectionConfig { + /// The frequency to run garbage collection at. + /// + /// If zero, automatic garbage collection is disabled, but + /// it can still be run manually with `atticd --mode garbage-collector-once`. + #[serde(with = "humantime_serde", default = "default_gc_interval")] + pub interval: Duration, + + /// The default retention period of unaccessed objects. + /// + /// Objects are subject to garbage collection if both the + /// `created_at` and `last_accessed_at` timestamps are older + /// than the retention period. + /// + /// Zero (default) means time-based garbage-collection is + /// disabled by default. You can enable it on a per-cache basis. + #[serde(rename = "default-retention-period")] + #[serde(with = "humantime_serde", default = "default_default_retention_period")] + pub default_retention_period: Duration, +} + +impl CompressionConfig { + pub fn level(&self) -> CompressionLevel { + if let Some(level) = self.level { + return CompressionLevel::Precise(level); + } + + match self.r#type { + CompressionType::Brotli => CompressionLevel::Precise(5), + CompressionType::Zstd => CompressionLevel::Precise(8), + CompressionType::Xz => CompressionLevel::Precise(2), + _ => CompressionLevel::Default, + } + } +} + +impl Default for CompressionConfig { + fn default() -> Self { + Self { + r#type: CompressionType::Zstd, + level: None, + } + } +} + +impl From for NixCompression { + fn from(t: CompressionType) -> Self { + match t { + CompressionType::None => NixCompression::None, + CompressionType::Brotli => NixCompression::Brotli, + CompressionType::Zstd => NixCompression::Zstd, + CompressionType::Xz => NixCompression::Xz, + } + } +} + +impl Default for GarbageCollectionConfig { + fn default() -> Self { + Self { + interval: Duration::from_secs(43200), + default_retention_period: Duration::ZERO, + } + } +} + +fn deserialize_base64_jwt_secret<'de, D>(deserializer: D) -> Result +where + D: de::Deserializer<'de>, +{ + use de::Error; + + let s = String::deserialize(deserializer)?; + let decoding = JwtDecodingKey::from_base64_secret(&s).map_err(Error::custom)?; + let encoding = JwtEncodingKey::from_base64_secret(&s).map_err(Error::custom)?; + + Ok(JwtKeys { decoding, encoding }) +} + +fn default_listen_address() -> SocketAddr { + "[::]:8080".parse().unwrap() +} + +fn default_db_heartbeat() -> bool { + false +} + +fn default_soft_delete_caches() -> bool { + false +} + +fn default_gc_interval() -> Duration { + Duration::from_secs(43200) +} + +fn default_default_retention_period() -> Duration { + Duration::ZERO +} + +pub fn load_config_from_path(path: &Path) -> Config { + tracing::info!("Using configurations: {:?}", path); + + let config = std::fs::read_to_string(path).expect("Failed to read configuration file"); + toml::from_str(&config).expect("Invalid configuration file") +} + +pub fn load_config_from_str(s: &str) -> Config { + tracing::info!("Using configurations from environment variable"); + toml::from_str(s).expect("Invalid configuration file") +} + +pub fn get_xdg_config_path() -> anyhow::Result { + let xdg_dirs = BaseDirectories::with_prefix(XDG_PREFIX)?; + let config_path = xdg_dirs.place_config_file("server.toml")?; + + Ok(config_path) +} + +pub fn get_xdg_data_path() -> anyhow::Result { + let xdg_dirs = BaseDirectories::with_prefix(XDG_PREFIX)?; + let data_path = xdg_dirs.create_data_directory("")?; + + Ok(data_path) +} diff --git a/server/src/database/entity/cache.rs b/server/src/database/entity/cache.rs new file mode 100644 index 0000000..8c51299 --- /dev/null +++ b/server/src/database/entity/cache.rs @@ -0,0 +1,72 @@ +//! A binary cache. + +use sea_orm::entity::prelude::*; + +use super::Json; +use attic::error::AtticResult; +use attic::signing::NixKeypair; + +pub type CacheModel = Model; + +/// A binary cache. +#[derive(Debug, Clone, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "cache")] +pub struct Model { + /// Unique numeric ID of the cache. + #[sea_orm(primary_key)] + pub id: i64, + + /// Unique name of the cache. + #[sea_orm(column_type = "String(Some(50))", unique, indexed)] + pub name: String, + + /// Signing keypair for the cache. + pub keypair: String, + + /// Whether the cache is public or not. + /// + /// Anonymous clients are implicitly granted the "pull" + /// permission to public caches. + pub is_public: bool, + + /// The Nix store path this binary cache uses. + pub store_dir: String, + + /// The priority of the binary cache. + /// + /// A lower number denotes a higher priority. + /// has a priority of 40. + pub priority: i32, + + /// A list of signing key names for upstream caches. + pub upstream_cache_key_names: Json>, + + /// Timestamp when the binary cache is created. + pub created_at: ChronoDateTimeUtc, + + /// Timestamp when the binary cache is deleted. + pub deleted_at: Option, + + /// The retention period of the cache, in seconds. + pub retention_period: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::object::Entity")] + Object, +} + +impl Model { + pub fn keypair(&self) -> AtticResult { + NixKeypair::from_str(&self.keypair) + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Object.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/server/src/database/entity/mod.rs b/server/src/database/entity/mod.rs new file mode 100644 index 0000000..6f6acb3 --- /dev/null +++ b/server/src/database/entity/mod.rs @@ -0,0 +1,56 @@ +//! Database entities. +//! +//! We use SeaORM and target PostgreSQL (production) and SQLite (development). + +pub mod cache; +pub mod nar; +pub mod object; + +use sea_orm::entity::Value; +use sea_orm::sea_query::{ArrayType, ColumnType, ValueType, ValueTypeErr}; +use sea_orm::{DbErr, QueryResult, TryGetError, TryGetable}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + +// A more generic version of https://github.com/SeaQL/sea-orm/pull/783 + +/// A value that is stored in the database as JSON. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct Json(pub T); + +impl From> for Value { + fn from(value: Json) -> Self { + let opt = serde_json::to_string(&value).ok().map(Box::new); + + Value::String(opt) + } +} + +impl TryGetable for Json { + fn try_get(res: &QueryResult, pre: &str, col: &str) -> Result { + let json_str: String = res.try_get(pre, col).map_err(TryGetError::DbErr)?; + + serde_json::from_str(&json_str).map_err(|e| TryGetError::DbErr(DbErr::Json(e.to_string()))) + } +} + +impl ValueType for Json { + fn try_from(v: Value) -> Result { + match v { + Value::String(Some(x)) => Ok(Json(serde_json::from_str(&x).map_err(|_| ValueTypeErr)?)), + _ => Err(ValueTypeErr), + } + } + + fn type_name() -> String { + stringify!(Json).to_owned() + } + + fn column_type() -> ColumnType { + ColumnType::String(None) + } + + fn array_type() -> ArrayType { + ArrayType::String + } +} diff --git a/server/src/database/entity/nar.rs b/server/src/database/entity/nar.rs new file mode 100644 index 0000000..044d318 --- /dev/null +++ b/server/src/database/entity/nar.rs @@ -0,0 +1,120 @@ +//! A content-addressed NAR in the global cache. + +use sea_orm::entity::prelude::*; + +use super::Json; +use crate::storage::RemoteFile; + +pub type NarModel = Model; + +/// The state of a NAR. +#[derive(EnumIter, DeriveActiveEnum, Debug, Clone, PartialEq, Eq)] +#[sea_orm(rs_type = "String", db_type = "String(Some(1))")] +pub enum NarState { + /// The NAR can be used. + /// + /// The NAR and file hashes have been confirmed. + #[sea_orm(string_value = "V")] + Valid, + + /// The NAR is a pending upload. + /// + /// The NAR and file hashes aren't trusted and may + /// not be available. + #[sea_orm(string_value = "P")] + PendingUpload, + + /// The NAR can be deleted because it already exists. + /// + /// This state can be transitioned into from `PendingUpload` + /// if some other client completes uploading the same NAR + /// faster. + #[sea_orm(string_value = "C")] + ConfirmedDeduplicated, + + /// The NAR is being deleted. + /// + /// This row will be deleted shortly. + #[sea_orm(string_value = "D")] + Deleted, +} + +/// A content-addressed NAR in the global cache. +/// +/// A NAR without `nix-store --export` metadata is context-free, +/// meaning that it's not associated with a store path and only +/// depends on its contents. +#[derive(Debug, Clone, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "nar")] +pub struct Model { + /// Unique numeric ID of the NAR. + #[sea_orm(primary_key)] + pub id: i64, + + /// The state of the NAR archive. + state: NarState, + + /// The hash of the NAR archive. + /// + /// This always begins with "sha256:" with the hash in the + /// hexadecimal format. + /// + /// The global cache may have several NARs with the same NAR + /// hash: + /// + /// - Unconfirmed uploads from clients + /// - Global deduplication is turned off + #[sea_orm(indexed)] + pub nar_hash: String, + + /// The size of the NAR archive. + pub nar_size: i64, + + /// The hash of the compressed file. + /// + /// This always begins with "sha256:" with the hash in the + /// hexadecimal format. + /// + /// This field may not be available if the file hashes aren't + /// confirmed. + pub file_hash: Option, + + /// The size of the compressed file. + /// + /// This field may not be available if the file hashes aren't + /// confirmed. + pub file_size: Option, + + /// The type of compression in use. + #[sea_orm(column_type = "String(Some(10))")] + pub compression: String, + + /// The remote file backing this NAR. + /// + /// Currently we only support S3-compatible providers, though it + /// should be easy to add in support for other forms of object storage + /// or even local storage. + pub remote_file: Json, + + /// Unique string identifying the remote file. + #[sea_orm(unique)] + pub remote_file_id: String, + + /// Number of processes holding this NAR. + /// + /// This is for preventing garbage collection of NARs when + /// there is a pending upload that can be deduplicated and + /// there is no existing object references. + pub holders_count: i32, + + /// Timestamp when the NAR is created. + pub created_at: ChronoDateTimeUtc, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::object::Entity")] + Object, +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/server/src/database/entity/object.rs b/server/src/database/entity/object.rs new file mode 100644 index 0000000..ec36fcc --- /dev/null +++ b/server/src/database/entity/object.rs @@ -0,0 +1,128 @@ +//! An object in a local cache. +//! +//! It's backed by a NAR in the global cache. + +use std::path::PathBuf; +use std::str::FromStr; + +use sea_orm::entity::prelude::*; + +use super::nar::NarModel; +use super::Json; +use crate::error::{ServerError, ServerResult}; +use crate::narinfo::{Compression, NarInfo}; +use attic::hash::Hash; + +pub type ObjectModel = Model; + +/// An object in a binary cache. +#[derive(Debug, Clone, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "object")] +pub struct Model { + /// Unique numeric ID of the object. + #[sea_orm(primary_key)] + pub id: i64, + + /// ID of the binary cache the object belongs to. + #[sea_orm(indexed)] + pub cache_id: i64, + + /// ID of the NAR this object points to. + pub nar_id: i64, + + /// The hash portion of the store path. + #[sea_orm(column_type = "String(Some(32))", indexed)] + pub store_path_hash: String, + + /// The full store path being cached, including the store directory. + pub store_path: String, + + /// Other store paths this object directly refereces. + pub references: Json>, + + /// The system this derivation is built for. + pub system: Option, + + /// The derivation that produced this object. + pub deriver: Option, + + /// Client-supplied signatures of this object. + pub sigs: Json>, + + /// The content address of this object. + /// + /// Technically this might belong to the NAR table since it's + /// an inherent property of the content, but there are multiple + /// formats for the CA and the feature isn't stable. So now we + /// simply treat it as a untrusted string. + pub ca: Option, + + /// Timestamp when the object is created. + pub created_at: ChronoDateTimeUtc, + + /// Timestamp when the object is last accessed. + pub last_accessed_at: Option, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::cache::Entity", + from = "Column::CacheId", + to = "super::cache::Column::Id" + )] + Cache, + + #[sea_orm( + belongs_to = "super::nar::Entity", + from = "Column::NarId", + to = "super::nar::Column::Id" + )] + Nar, +} + +impl Model { + /// Converts this object to a NarInfo. + pub fn to_nar_info(&self, nar: &NarModel) -> ServerResult { + // FIXME: Return Err if file_hash and file_size don't exist + let file_size = nar + .file_size + .unwrap() + .try_into() + .map_err(ServerError::database_error)?; + let nar_size = nar + .nar_size + .try_into() + .map_err(ServerError::database_error)?; + + Ok(NarInfo { + store_path: PathBuf::from(self.store_path.to_owned()), + url: format!("nar/{}.nar", self.store_path_hash.as_str()), + + compression: Compression::from_str(&nar.compression)?, + file_hash: Hash::from_typed(nar.file_hash.as_ref().unwrap())?, + file_size, + nar_hash: Hash::from_typed(&nar.nar_hash)?, + nar_size, + system: self.system.to_owned(), + references: self.references.0.to_owned(), + deriver: self.deriver.to_owned(), + signature: None, + ca: self.ca.to_owned(), + }) + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Cache.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Nar.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} diff --git a/server/src/database/migration/m20221227_000001_create_cache_table.rs b/server/src/database/migration/m20221227_000001_create_cache_table.rs new file mode 100644 index 0000000..6ab20c7 --- /dev/null +++ b/server/src/database/migration/m20221227_000001_create_cache_table.rs @@ -0,0 +1,67 @@ +use sea_orm_migration::prelude::*; + +use crate::database::entity::cache::*; + +pub struct Migration; + +impl MigrationName for Migration { + fn name(&self) -> &str { + "m20221227_000001_create_cache_table" + } +} + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .create_table( + Table::create() + .table(Entity) + .if_not_exists() + .col( + ColumnDef::new(Column::Id) + .big_integer() + .not_null() + .auto_increment() + .primary_key(), + ) + .col( + ColumnDef::new(Column::Name) + .string_len(50) + .not_null() + .unique_key(), + ) + .col(ColumnDef::new(Column::Keypair).string().not_null()) + .col(ColumnDef::new(Column::IsPublic).boolean().not_null()) + .col(ColumnDef::new(Column::StoreDir).string().not_null()) + .col(ColumnDef::new(Column::Priority).integer().not_null()) + .col( + ColumnDef::new(Column::UpstreamCacheKeyNames) + .string() + .not_null(), + ) + .col( + ColumnDef::new(Column::CreatedAt) + .timestamp_with_time_zone() + .not_null(), + ) + .col( + ColumnDef::new(Column::DeletedAt) + .timestamp_with_time_zone() + .null(), + ) + .to_owned(), + ) + .await?; + + manager + .create_index( + Index::create() + .name("idx-cache-name") + .table(Entity) + .col(Column::Name) + .to_owned(), + ) + .await + } +} diff --git a/server/src/database/migration/m20221227_000002_create_nar_table.rs b/server/src/database/migration/m20221227_000002_create_nar_table.rs new file mode 100644 index 0000000..bcc2dd8 --- /dev/null +++ b/server/src/database/migration/m20221227_000002_create_nar_table.rs @@ -0,0 +1,71 @@ +use sea_orm_migration::prelude::*; + +use crate::database::entity::nar::*; + +pub struct Migration; + +impl MigrationName for Migration { + fn name(&self) -> &str { + "m20221227_000003_create_nar_table" + } +} + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .create_table( + Table::create() + .table(Entity) + .if_not_exists() + .col( + ColumnDef::new(Column::Id) + .big_integer() + .not_null() + .auto_increment() + .primary_key(), + ) + .col( + ColumnDef::new(Column::State) + .r#char() + .char_len(1) + .not_null(), + ) + .col(ColumnDef::new(Column::NarHash).string().not_null()) + .col(ColumnDef::new(Column::NarSize).big_integer().not_null()) + .col(ColumnDef::new(Column::FileHash).string().null()) + .col(ColumnDef::new(Column::FileSize).big_integer().null()) + .col(ColumnDef::new(Column::Compression).string().not_null()) + .col(ColumnDef::new(Column::RemoteFile).string().not_null()) + .col( + ColumnDef::new(Column::RemoteFileId) + .string() + .not_null() + .unique_key(), + ) + .col( + ColumnDef::new(Column::HoldersCount) + .integer() + .not_null() + .default(0), + ) + .col( + ColumnDef::new(Column::CreatedAt) + .timestamp_with_time_zone() + .not_null(), + ) + .to_owned(), + ) + .await?; + + manager + .create_index( + Index::create() + .name("idx-nar-nar-hash") + .table(Entity) + .col(Column::NarHash) + .to_owned(), + ) + .await + } +} diff --git a/server/src/database/migration/m20221227_000003_create_object_table.rs b/server/src/database/migration/m20221227_000003_create_object_table.rs new file mode 100644 index 0000000..6be34f4 --- /dev/null +++ b/server/src/database/migration/m20221227_000003_create_object_table.rs @@ -0,0 +1,82 @@ +use sea_orm_migration::prelude::*; + +use crate::database::entity::cache; +use crate::database::entity::nar; +use crate::database::entity::object::*; + +pub struct Migration; + +impl MigrationName for Migration { + fn name(&self) -> &str { + "m20221227_000002_create_object_table" + } +} + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .create_table( + Table::create() + .table(Entity) + .if_not_exists() + .col( + ColumnDef::new(Column::Id) + .big_integer() + .not_null() + .auto_increment() + .primary_key(), + ) + .col(ColumnDef::new(Column::CacheId).big_integer().not_null()) + .col(ColumnDef::new(Column::NarId).big_integer().not_null()) + .col( + ColumnDef::new(Column::StorePathHash) + .string_len(32) + .not_null(), + ) + .col(ColumnDef::new(Column::StorePath).string().not_null()) + .col(ColumnDef::new(Column::References).string().not_null()) + .col(ColumnDef::new(Column::System).string()) + .col(ColumnDef::new(Column::Deriver).string()) + .col(ColumnDef::new(Column::Sigs).string().not_null()) + .col(ColumnDef::new(Column::Ca).string()) + .col( + ColumnDef::new(Column::CreatedAt) + .timestamp_with_time_zone() + .not_null(), + ) + .foreign_key( + ForeignKeyCreateStatement::new() + .name("fk_object_cache") + .from_tbl(Entity) + .from_col(Column::CacheId) + .to_tbl(cache::Entity) + .to_col(cache::Column::Id) + .on_delete(ForeignKeyAction::Cascade), + ) + .foreign_key( + ForeignKeyCreateStatement::new() + .name("fk_object_nar") + .from_tbl(Entity) + .from_col(Column::NarId) + .to_tbl(nar::Entity) + .to_col(nar::Column::Id) + .on_delete(ForeignKeyAction::Cascade), + ) + .to_owned(), + ) + .await?; + + manager + .create_index( + Index::create() + .name("idx-object-cache-hash") + .table(Entity) + .col(Column::CacheId) + .col(Column::StorePathHash) + .unique() + .to_owned(), + ) + .await + } +} diff --git a/server/src/database/migration/m20221227_000004_add_object_last_accessed.rs b/server/src/database/migration/m20221227_000004_add_object_last_accessed.rs new file mode 100644 index 0000000..e12c37f --- /dev/null +++ b/server/src/database/migration/m20221227_000004_add_object_last_accessed.rs @@ -0,0 +1,31 @@ +use sea_orm_migration::prelude::*; + +use crate::database::entity::object::*; + +pub struct Migration; + +impl MigrationName for Migration { + fn name(&self) -> &str { + "m20221227_000004_add_object_last_accessed" + } +} + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .alter_table( + Table::alter() + .table(Entity) + .add_column( + ColumnDef::new(Column::LastAccessedAt) + .timestamp_with_time_zone() + .null(), + ) + .to_owned(), + ) + .await?; + + Ok(()) + } +} diff --git a/server/src/database/migration/m20221227_000005_add_cache_retention_period.rs b/server/src/database/migration/m20221227_000005_add_cache_retention_period.rs new file mode 100644 index 0000000..eb8894f --- /dev/null +++ b/server/src/database/migration/m20221227_000005_add_cache_retention_period.rs @@ -0,0 +1,27 @@ +use sea_orm_migration::prelude::*; + +use crate::database::entity::cache::*; + +pub struct Migration; + +impl MigrationName for Migration { + fn name(&self) -> &str { + "m20221227_000005_add_cache_retention_period" + } +} + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .alter_table( + Table::alter() + .table(Entity) + .add_column(ColumnDef::new(Column::RetentionPeriod).integer().null()) + .to_owned(), + ) + .await?; + + Ok(()) + } +} diff --git a/server/src/database/migration/mod.rs b/server/src/database/migration/mod.rs new file mode 100644 index 0000000..2146e3d --- /dev/null +++ b/server/src/database/migration/mod.rs @@ -0,0 +1,24 @@ +//! Database migrations. + +pub use sea_orm_migration::*; + +mod m20221227_000001_create_cache_table; +mod m20221227_000002_create_nar_table; +mod m20221227_000003_create_object_table; +mod m20221227_000004_add_object_last_accessed; +mod m20221227_000005_add_cache_retention_period; + +pub struct Migrator; + +#[async_trait::async_trait] +impl MigratorTrait for Migrator { + fn migrations() -> Vec> { + vec![ + Box::new(m20221227_000001_create_cache_table::Migration), + Box::new(m20221227_000002_create_nar_table::Migration), + Box::new(m20221227_000003_create_object_table::Migration), + Box::new(m20221227_000004_add_object_last_accessed::Migration), + Box::new(m20221227_000005_add_cache_retention_period::Migration), + ] + } +} diff --git a/server/src/database/mod.rs b/server/src/database/mod.rs new file mode 100644 index 0000000..0c956c1 --- /dev/null +++ b/server/src/database/mod.rs @@ -0,0 +1,194 @@ +pub mod entity; +pub mod migration; + +use std::ops::Deref; + +use async_trait::async_trait; +use chrono::Utc; +use sea_orm::entity::prelude::*; +use sea_orm::entity::Iterable as EnumIterable; +use sea_orm::query::{JoinType, QuerySelect, QueryTrait}; +use sea_orm::sea_query::{Expr, LockBehavior, LockType, Query, Value}; +use sea_orm::{ActiveValue::Set, ConnectionTrait, DatabaseConnection, FromQueryResult}; +use tokio::task; + +use crate::error::{ServerError, ServerResult}; +use attic::cache::CacheName; +use attic::hash::Hash; +use attic::nix_store::StorePathHash; +use entity::cache::{self, CacheModel, Entity as Cache}; +use entity::nar::{self, Entity as Nar, NarModel, NarState}; +use entity::object::{self, Entity as Object, ObjectModel}; + +const SELECT_OBJECT: &str = "O_"; +const SELECT_CACHE: &str = "C_"; +const SELECT_NAR: &str = "N_"; + +#[async_trait] +pub trait AtticDatabase: Send + Sync { + /// Retrieves an object in a binary cache by its store path hash. + async fn find_object_by_store_path_hash( + &self, + cache: &CacheName, + store_path_hash: &StorePathHash, + ) -> ServerResult<(ObjectModel, CacheModel, NarModel)>; + + /// Retrieves a binary cache. + async fn find_cache(&self, cache: &CacheName) -> ServerResult; + + /// Retrieves and locks a valid NAR matching a NAR Hash. + async fn find_and_lock_nar(&self, nar_hash: &Hash) -> ServerResult>; + + /// Bumps the last accessed timestamp of an object. + async fn bump_object_last_accessed(&self, object_id: i64) -> ServerResult<()>; +} + +pub struct NarGuard { + database: DatabaseConnection, + nar: NarModel, +} + +fn prefix_column(mut select: S, prefix: &str) -> S { + for col in ::iter() { + let alias = format!("{}{}", prefix, Iden::to_string(&col)); + select = select.column_as(col, alias); + } + select +} + +pub fn build_cache_object_nar_query() -> Select { + let mut query = Object::find() + .select_only() + .join(JoinType::LeftJoin, object::Relation::Cache.def()) + .join(JoinType::LeftJoin, object::Relation::Nar.def()); + + query = prefix_column::(query, SELECT_OBJECT); + query = prefix_column::(query, SELECT_CACHE); + query = prefix_column::(query, SELECT_NAR); + + query +} + +#[async_trait] +impl AtticDatabase for DatabaseConnection { + async fn find_object_by_store_path_hash( + &self, + cache: &CacheName, + store_path_hash: &StorePathHash, + ) -> ServerResult<(ObjectModel, CacheModel, NarModel)> { + let stmt = build_cache_object_nar_query() + .filter(cache::Column::Name.eq(cache.as_str())) + .filter(cache::Column::DeletedAt.is_null()) + .filter(object::Column::StorePathHash.eq(store_path_hash.as_str())) + .filter(nar::Column::State.eq(NarState::Valid)) + .limit(1) + .build(self.get_database_backend()); + + let result = self + .query_one(stmt) + .await + .map_err(ServerError::database_error)? + .ok_or(ServerError::NoSuchObject)?; + + let object = object::Model::from_query_result(&result, SELECT_OBJECT) + .map_err(ServerError::database_error)?; + let cache = cache::Model::from_query_result(&result, SELECT_CACHE) + .map_err(ServerError::database_error)?; + let nar = nar::Model::from_query_result(&result, SELECT_NAR) + .map_err(ServerError::database_error)?; + + Ok((object, cache, nar)) + } + + async fn find_cache(&self, cache: &CacheName) -> ServerResult { + Cache::find() + .filter(cache::Column::Name.eq(cache.as_str())) + .filter(cache::Column::DeletedAt.is_null()) + .one(self) + .await + .map_err(ServerError::database_error)? + .ok_or(ServerError::NoSuchCache) + } + + async fn find_and_lock_nar(&self, nar_hash: &Hash) -> ServerResult> { + let one = Value::Unsigned(Some(1)); + let matched_ids = Query::select() + .from(Nar) + .and_where(nar::Column::NarHash.eq(nar_hash.to_typed_base16())) + .and_where(nar::Column::State.eq(NarState::Valid)) + .expr(Expr::col(nar::Column::Id)) + .lock_with_behavior(LockType::Update, LockBehavior::SkipLocked) + .limit(1) + .to_owned(); + let incr_holders = Query::update() + .table(Nar) + .values([( + nar::Column::HoldersCount, + Expr::col(nar::Column::HoldersCount).add(one), + )]) + .and_where(nar::Column::Id.in_subquery(matched_ids)) + .returning_all() + .to_owned(); + let stmt = self.get_database_backend().build(&incr_holders); + + let guard = nar::Model::find_by_statement(stmt) + .one(self) + .await + .map_err(ServerError::database_error)? + .map(|nar| NarGuard { + database: self.clone(), + nar, + }); + + Ok(guard) + } + + async fn bump_object_last_accessed(&self, object_id: i64) -> ServerResult<()> { + let now = Utc::now(); + + Object::update(object::ActiveModel { + id: Set(object_id), + last_accessed_at: Set(Some(now)), + ..Default::default() + }) + .exec(self) + .await + .map_err(ServerError::database_error)?; + + Ok(()) + } +} + +impl Deref for NarGuard { + type Target = NarModel; + + fn deref(&self) -> &Self::Target { + &self.nar + } +} + +impl Drop for NarGuard { + fn drop(&mut self) { + let database = self.database.clone(); + let nar_id = self.nar.id; + + task::spawn(async move { + tracing::debug!("Unlocking NAR"); + + let one = Value::Unsigned(Some(1)); + let decr_holders = Query::update() + .table(Nar) + .values([( + nar::Column::HoldersCount, + Expr::col(nar::Column::HoldersCount).sub(one), + )]) + .and_where(nar::Column::Id.eq(nar_id)) + .to_owned(); + let stmt = database.get_database_backend().build(&decr_holders); + + if let Err(e) = database.execute(stmt).await { + tracing::warn!("Failed to decrement holders count: {}", e); + } + }); + } +} diff --git a/server/src/error.rs b/server/src/error.rs new file mode 100644 index 0000000..df63c97 --- /dev/null +++ b/server/src/error.rs @@ -0,0 +1,173 @@ +//! Error handling. + +use std::error::Error as StdError; + +use anyhow::Error as AnyError; +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use axum::Json; +use displaydoc::Display; +use serde::Serialize; + +use attic::error::AtticError; + +pub type ServerResult = Result; + +/// An error. +#[derive(Debug, Display)] +pub enum ServerError { + // Generic responses + /// The URL you requested was not found. + NotFound, + + /// Unauthorized. + Unauthorized, + + /// The server encountered an internal error or misconfiguration. + InternalServerError, + + // Specialized responses + /// The requested cache does not exist. + NoSuchCache, + + /// The cache already exists. + CacheAlreadyExists, + + /// The requested object does not exist. + NoSuchObject, + + /// Invalid compression type "{name}". + InvalidCompressionType { name: String }, + + /// Database error: {0} + DatabaseError(AnyError), + + /// Remote file error: {0} + RemoteFileError(AnyError), + + /// Manifest serialization error: {0} + ManifestSerializationError(super::nix_manifest::Error), + + /// Access error: {0} + AccessError(super::access::Error), + + /// General request error: {0} + RequestError(AnyError), + + /// Error from the common components. + AtticError(AtticError), +} + +#[derive(Serialize)] +pub struct ErrorResponse { + code: u16, + error: String, + message: String, +} + +impl ServerError { + pub fn database_error(error: impl StdError + Send + Sync + 'static) -> Self { + Self::DatabaseError(AnyError::new(error)) + } + + pub fn remote_file_error(error: impl StdError + Send + Sync + 'static) -> Self { + Self::RemoteFileError(AnyError::new(error)) + } + + pub fn request_error(error: impl StdError + Send + Sync + 'static) -> Self { + Self::RequestError(AnyError::new(error)) + } + + fn name(&self) -> &'static str { + match self { + Self::NotFound => "NotFound", + Self::Unauthorized => "Unauthorized", + Self::InternalServerError => "InternalServerError", + + Self::NoSuchObject => "NoSuchObject", + Self::NoSuchCache => "NoSuchCache", + Self::CacheAlreadyExists => "CacheAlreadyExists", + Self::InvalidCompressionType { .. } => "InvalidCompressionType", + Self::AtticError(e) => e.name(), + Self::DatabaseError(_) => "DatabaseError", + Self::RemoteFileError(_) => "RemoteFileError", + Self::ManifestSerializationError(_) => "ManifestSerializationError", + Self::AccessError(_) => "AccessError", + Self::RequestError(_) => "RequestError", + } + } + + /// Returns a more restricted version of this error for a client without discovery + /// permissions. + pub fn into_no_discovery_permissions(self) -> Self { + match self { + Self::NoSuchCache => Self::Unauthorized, + Self::NoSuchObject => Self::Unauthorized, + Self::AccessError(_) => Self::Unauthorized, + + _ => self, + } + } + + /// Returns a version of this error for clients. + fn into_clients(self) -> Self { + match self { + Self::AccessError(super::access::Error::NoDiscoveryPermission) => Self::Unauthorized, + + Self::DatabaseError(_) => Self::InternalServerError, + Self::RemoteFileError(_) => Self::InternalServerError, + Self::ManifestSerializationError(_) => Self::InternalServerError, + + _ => self, + } + } + + fn http_status_code(&self) -> StatusCode { + match self { + Self::NotFound => StatusCode::NOT_FOUND, + Self::Unauthorized => StatusCode::UNAUTHORIZED, + Self::InternalServerError => StatusCode::INTERNAL_SERVER_ERROR, + + Self::AccessError(_) => StatusCode::FORBIDDEN, + Self::NoSuchCache => StatusCode::NOT_FOUND, + Self::NoSuchObject => StatusCode::NOT_FOUND, + Self::CacheAlreadyExists => StatusCode::BAD_REQUEST, + Self::ManifestSerializationError(_) => StatusCode::BAD_REQUEST, + Self::RequestError(_) => StatusCode::BAD_REQUEST, + Self::InvalidCompressionType { .. } => StatusCode::BAD_REQUEST, + _ => StatusCode::INTERNAL_SERVER_ERROR, + } + } +} + +impl StdError for ServerError {} + +impl From for ServerError { + fn from(error: AtticError) -> Self { + Self::AtticError(error) + } +} + +impl From for ServerError { + fn from(error: super::access::Error) -> Self { + Self::AccessError(error) + } +} + +impl IntoResponse for ServerError { + fn into_response(self) -> Response { + tracing::warn!("{:?}", self); + + // TODO: don't sanitize in dev mode + let sanitized = self.into_clients(); + + let status_code = sanitized.http_status_code(); + let error_response = ErrorResponse { + code: status_code.as_u16(), + message: sanitized.to_string(), + error: sanitized.name().to_string(), + }; + + (status_code, Json(error_response)).into_response() + } +} diff --git a/server/src/gc.rs b/server/src/gc.rs new file mode 100644 index 0000000..de66342 --- /dev/null +++ b/server/src/gc.rs @@ -0,0 +1,204 @@ +//! Garbage collection. + +use std::sync::Arc; +use std::time::Duration; + +use anyhow::{anyhow, Result}; +use chrono::{Duration as ChronoDuration, Utc}; +use futures::future::join_all; +use sea_orm::entity::prelude::*; +use sea_orm::query::QuerySelect; +use sea_orm::sea_query::{LockBehavior, LockType, Query}; +use sea_orm::{ConnectionTrait, FromQueryResult}; +use tokio::sync::Semaphore; +use tokio::time; +use tracing::instrument; + +use super::{State, StateInner}; +use crate::config::Config; +use crate::database::entity::cache::{self, Entity as Cache}; +use crate::database::entity::nar::{self, Entity as Nar, NarState}; +use crate::database::entity::object::{self, Entity as Object}; + +#[derive(Debug, FromQueryResult)] +struct CacheIdAndRetentionPeriod { + id: i64, + name: String, + retention_period: i32, +} + +/// Runs garbage collection periodically. +pub async fn run_garbage_collection(config: Config) { + let interval = config.garbage_collection.interval; + + if interval == Duration::ZERO { + // disabled + return; + } + + loop { + // We don't stop even if it errors + if let Err(e) = run_garbage_collection_once(config.clone()).await { + tracing::warn!("Garbage collection failed: {}", e); + } + + time::sleep(interval).await; + } +} + +/// Runs garbage collection once. +#[instrument(skip_all)] +pub async fn run_garbage_collection_once(config: Config) -> Result<()> { + tracing::info!("Running garbage collection..."); + + let state = StateInner::new(config).await; + run_time_based_garbage_collection(&state).await?; + run_reap_orphan_nars(&state).await?; + + Ok(()) +} + +#[instrument(skip_all)] +async fn run_time_based_garbage_collection(state: &State) -> Result<()> { + let db = state.database().await?; + let now = Utc::now(); + + let default_retention_period = state.config.garbage_collection.default_retention_period; + let retention_period = + cache::Column::RetentionPeriod.if_null(default_retention_period.as_secs() as i32); + + // Find caches with retention periods set + let caches = Cache::find() + .select_only() + .column(cache::Column::Id) + .column(cache::Column::Name) + .column_as(retention_period.clone(), "retention_period") + .filter(retention_period.not_equals(0)) + .into_model::() + .all(db) + .await?; + + tracing::info!( + "Found {} caches subject to time-based garbage collection", + caches.len() + ); + + let mut objects_deleted = 0; + + for cache in caches { + let period = ChronoDuration::seconds(cache.retention_period.into()); + let cutoff = now.checked_sub_signed(period).ok_or_else(|| { + anyhow!( + "Somehow subtracting retention period for cache {} underflowed", + cache.name + ) + })?; + + let deletion = Object::delete_many() + .filter(object::Column::CacheId.eq(cache.id)) + .filter(object::Column::CreatedAt.lt(cutoff)) + .filter( + object::Column::LastAccessedAt + .is_null() + .or(object::Column::LastAccessedAt.lt(cutoff)), + ) + .exec(db) + .await?; + + tracing::info!( + "Deleted {} objects from {} (ID {})", + deletion.rows_affected, + cache.name, + cache.id + ); + objects_deleted += deletion.rows_affected; + } + + tracing::info!("Deleted {} objects in total", objects_deleted); + + Ok(()) +} + +#[instrument(skip_all)] +async fn run_reap_orphan_nars(state: &State) -> Result<()> { + let db = state.database().await?; + let storage = state.storage().await?; + + // find all orphan NARs... + let orphan_nar_ids = Query::select() + .from(Nar) + .expr(nar::Column::Id.into_expr()) + .left_join( + Object, + object::Column::NarId + .into_expr() + .eq(nar::Column::Id.into_expr()), + ) + .and_where(object::Column::Id.is_null()) + .and_where(nar::Column::State.eq(NarState::Valid)) + .and_where(nar::Column::HoldersCount.eq(0)) + .lock_with_behavior(LockType::Update, LockBehavior::SkipLocked) + .to_owned(); + + // ... and transition their state to Deleted + // + // Deleted NARs are essentially invisible from our normal queries + let change_state = Query::update() + .table(Nar) + .value(nar::Column::State, NarState::Deleted) + .and_where(nar::Column::Id.in_subquery(orphan_nar_ids)) + .returning_all() + .to_owned(); + + let stmt = db.get_database_backend().build(&change_state); + + let orphan_nars = nar::Model::find_by_statement(stmt).all(db).await?; + + if orphan_nars.is_empty() { + return Ok(()); + } + + // Delete the NARs from remote storage + let delete_limit = Arc::new(Semaphore::new(20)); // TODO: Make this configurable + let futures: Vec<_> = orphan_nars + .into_iter() + .map(|nar| { + let delete_limit = delete_limit.clone(); + async move { + let permit = delete_limit.acquire().await?; + storage.delete_file_db(&nar.remote_file.0).await?; + drop(permit); + Result::<_, anyhow::Error>::Ok(nar.id) + } + }) + .collect(); + + // Deletions can result in spurious failures, tolerate them + // + // NARs that failed to be deleted from the remote storage will + // just be stuck in Deleted state. + // + // TODO: Maybe have an interactive command to retry deletions? + let deleted_nar_ids: Vec<_> = join_all(futures) + .await + .into_iter() + .filter(|r| { + if let Err(e) = r { + tracing::warn!("Deletion failed: {}", e); + } + + r.is_ok() + }) + .map(|r| r.unwrap()) + .collect(); + + // Finally, delete them from the database + let deletion = Nar::delete_many() + .filter(nar::Column::Id.is_in(deleted_nar_ids)) + .exec(db) + .await?; + + tracing::info!("Deleted {} NARs", deletion.rows_affected); + + Ok(()) +} diff --git a/server/src/lib.rs b/server/src/lib.rs new file mode 100644 index 0000000..a611c39 --- /dev/null +++ b/server/src/lib.rs @@ -0,0 +1,224 @@ +#![deny( + asm_sub_register, + deprecated, + missing_abi, + unsafe_code, + unused_macros, + unused_must_use, + unused_unsafe +)] +#![deny(clippy::from_over_into, clippy::needless_question_mark)] +#![cfg_attr( + not(debug_assertions), + deny(unused_imports, unused_mut, unused_variables,) +)] + +pub mod access; +mod api; +pub mod config; +pub mod database; +mod error; +pub mod gc; +mod middleware; +mod narinfo; +pub mod nix_manifest; +pub mod oobe; +mod storage; + +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Result; +use axum::{ + extract::Extension, + http::{uri::Scheme, Uri}, + Router, +}; +use sea_orm::{query::Statement, ConnectionTrait, Database, DatabaseConnection}; +use tokio::sync::OnceCell; +use tokio::time; +use tower_http::catch_panic::CatchPanicLayer; + +use access::http::{apply_auth, AuthState}; +use attic::cache::CacheName; +use config::{Config, StorageConfig}; +use database::migration::{Migrator, MigratorTrait}; +use error::{ServerError, ServerResult}; +use middleware::{init_request_state, restrict_host}; +use storage::{LocalBackend, S3Backend, StorageBackend}; + +type State = Arc; +type RequestState = Arc; + +/// Global server state. +#[derive(Debug)] +pub struct StateInner { + /// The Attic Server configuration. + config: Config, + + /// Handle to the database. + database: OnceCell, + + /// Handle to the storage backend. + storage: OnceCell>>, +} + +/// Request state. +#[derive(Debug)] +struct RequestStateInner { + /// Auth state. + auth: AuthState, + + /// The canonical API endpoint. + api_endpoint: Option, + + /// The potentially-invalid Host header supplied by the client. + host: String, + + /// Whether the client claims the connection is HTTPS or not. + client_claims_https: bool, +} + +impl StateInner { + async fn new(config: Config) -> State { + Arc::new(Self { + config, + database: OnceCell::new(), + storage: OnceCell::new(), + }) + } + + /// Returns a handle to the database. + async fn database(&self) -> ServerResult<&DatabaseConnection> { + self.database + .get_or_try_init(|| async { + Database::connect(&self.config.database.url) + .await + .map_err(ServerError::database_error) + }) + .await + } + + /// Returns a handle to the storage backend. + async fn storage(&self) -> ServerResult<&Arc>> { + self.storage + .get_or_try_init(|| async { + match &self.config.storage { + StorageConfig::Local(local_config) => { + let local = LocalBackend::new(local_config.clone()).await?; + let boxed: Box = Box::new(local); + Ok(Arc::new(boxed)) + } + StorageConfig::S3(s3_config) => { + let s3 = S3Backend::new(s3_config.clone())?; + let boxed: Box = Box::new(s3); + Ok(Arc::new(boxed)) + } + } + }) + .await + } + + /// Sends periodic heartbeat queries to the database. + async fn run_db_heartbeat(&self) -> ServerResult<()> { + let db = self.database().await?; + let stmt = + Statement::from_string(db.get_database_backend(), "SELECT 'heartbeat';".to_string()); + + loop { + let _ = db.execute(stmt.clone()).await; + time::sleep(Duration::from_secs(60)).await; + } + } +} + +impl RequestStateInner { + /// Returns the base API endpoint for clients. + /// + /// The APIs encompass both the Attic API and the Nix binary + /// cache API. + fn api_endpoint(&self) -> ServerResult { + if let Some(endpoint) = &self.api_endpoint { + Ok(endpoint.to_owned()) + } else { + // Naively synthesize from client's Host header + // For convenience and shouldn't be used in production! + let uri = Uri::builder() + .scheme(if self.client_claims_https { + Scheme::HTTPS + } else { + Scheme::HTTP + }) + .authority(self.host.to_owned()) + .path_and_query("/") + .build() + .map_err(ServerError::request_error)?; + + Ok(uri.to_string()) + } + } + + /// Returns the Nix binary cache endpoint for clients. + /// + /// The binary cache endpoint may live on another host than + /// the canonical API endpoint. + fn substituter_endpoint(&self, cache: CacheName) -> ServerResult { + Ok(format!("{}{}", self.api_endpoint()?, cache.as_str())) + } +} + +/// The fallback route. +#[axum_macros::debug_handler] +async fn fallback(_: Uri) -> ServerResult<()> { + Err(ServerError::NotFound) +} + +/// Runs the API server. +pub async fn run_api_server(cli_listen: Option, config: Config) -> Result<()> { + eprintln!("Starting API server..."); + + let state = StateInner::new(config).await; + + let listen = if let Some(cli_listen) = cli_listen { + cli_listen + } else { + state.config.listen.to_owned() + }; + + let rest = Router::new() + .merge(api::get_router()) + .fallback(fallback) + // middlewares + .layer(axum::middleware::from_fn(apply_auth)) + .layer(axum::middleware::from_fn(init_request_state)) + .layer(axum::middleware::from_fn(restrict_host)) + .layer(Extension(state.clone())) + .layer(CatchPanicLayer::new()); + + eprintln!("Listening on {:?}...", listen); + + let (server_ret, _) = tokio::join!( + axum::Server::bind(&listen).serve(rest.into_make_service()), + async { + if state.config.database.heartbeat { + let _ = state.run_db_heartbeat().await; + } + }, + ); + + server_ret?; + + Ok(()) +} + +/// Runs database migrations. +pub async fn run_migrations(config: Config) -> Result<()> { + eprintln!("Running migrations..."); + + let state = StateInner::new(config).await; + let db = state.database().await?; + Migrator::up(db, None).await?; + + Ok(()) +} diff --git a/server/src/main.rs b/server/src/main.rs new file mode 100644 index 0000000..c9d2274 --- /dev/null +++ b/server/src/main.rs @@ -0,0 +1,117 @@ +use std::env; +use std::net::SocketAddr; +use std::path::PathBuf; + +use anyhow::Result; +use clap::{Parser, ValueEnum}; +use tokio::join; + +use attic_server::config; + +/// Nix binary cache server. +#[derive(Debug, Parser)] +#[clap(version, author = "Zhaofeng Li ")] +#[clap(propagate_version = true)] +struct Opts { + /// Path to the config file. + #[clap(short = 'f', long)] + config: Option, + + /// Socket address to listen on. + /// + /// This overrides `listen` in the config. + #[clap(short = 'l', long)] + listen: Option, + + /// Mode to run. + #[clap(long, default_value = "monolithic")] + mode: ServerMode, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] +enum ServerMode { + /// Run all components. + Monolithic, + + /// Run the API server. + ApiServer, + + /// Run the garbage collector periodically. + GarbageCollector, + + /// Run the database migrations then exit. + DbMigrations, + + /// Run garbage collection then exit. + GarbageCollectorOnce, +} + +#[tokio::main] +async fn main() -> Result<()> { + init_logging(); + dump_version(); + + let opts = Opts::parse(); + let config = if let Some(config_path) = opts.config { + config::load_config_from_path(&config_path) + } else if let Ok(config_env) = env::var("ATTIC_SERVER_CONFIG_BASE64") { + let decoded = String::from_utf8(base64::decode(config_env.as_bytes())?)?; + config::load_config_from_str(&decoded) + } else { + // Config from XDG + let config_path = config::get_xdg_config_path()?; + + if opts.mode == ServerMode::Monolithic { + // Special OOBE sequence + attic_server::oobe::run_oobe().await?; + } else if !config_path.exists() { + eprintln!("You haven't specified a config file (--config/-f), and the XDG config file doesn't exist."); + eprintln!("Hint: To automatically set up Attic, run `atticd` without any arguments."); + } + + config::load_config_from_path(&config_path) + }; + + match opts.mode { + ServerMode::Monolithic => { + attic_server::run_migrations(config.clone()).await?; + + let (api_server, _) = join!( + attic_server::run_api_server(opts.listen, config.clone()), + attic_server::gc::run_garbage_collection(config.clone()), + ); + + api_server?; + } + ServerMode::ApiServer => { + attic_server::run_api_server(opts.listen, config).await?; + } + ServerMode::GarbageCollector => { + attic_server::gc::run_garbage_collection(config.clone()).await; + } + ServerMode::DbMigrations => { + attic_server::run_migrations(config).await?; + } + ServerMode::GarbageCollectorOnce => { + attic_server::gc::run_garbage_collection_once(config).await?; + } + } + + Ok(()) +} + +fn init_logging() { + #[cfg(not(feature = "tokio-console"))] + tracing_subscriber::fmt::init(); + + #[cfg(feature = "tokio-console")] + console_subscriber::init(); +} + +fn dump_version() { + #[cfg(debug_assertions)] + eprintln!("Attic Server {} (debug)", env!("CARGO_PKG_VERSION")); + + #[cfg(not(debug_assertions))] + eprintln!("Attic Server {} (release)", env!("CARGO_PKG_VERSION")); +} diff --git a/server/src/middleware.rs b/server/src/middleware.rs new file mode 100644 index 0000000..adba6ce --- /dev/null +++ b/server/src/middleware.rs @@ -0,0 +1,57 @@ +use std::sync::Arc; + +use anyhow::anyhow; +use axum::{ + extract::{Extension, Host}, + http::Request, + middleware::Next, + response::Response, +}; + +use super::{AuthState, RequestStateInner, State}; +use crate::error::{ServerError, ServerResult}; + +/// Initializes per-request state. +pub async fn init_request_state( + Extension(state): Extension, + Host(host): Host, + mut req: Request, + next: Next, +) -> Response { + // X-Forwarded-Proto is an untrusted header + let client_claims_https = + if let Some(x_forwarded_proto) = req.headers().get("x-forwarded-proto") { + x_forwarded_proto.as_bytes() == b"https" + } else { + false + }; + + let req_state = Arc::new(RequestStateInner { + auth: AuthState::new(), + api_endpoint: state.config.api_endpoint.to_owned(), + host, + client_claims_https, + }); + + req.extensions_mut().insert(req_state); + next.run(req).await +} + +/// Restricts valid Host headers. +/// +/// We also require that all request have a Host header in +/// the first place. +pub async fn restrict_host( + Extension(state): Extension, + Host(host): Host, + req: Request, + next: Next, +) -> ServerResult { + let allowed_hosts = &state.config.allowed_hosts; + + if !allowed_hosts.is_empty() && !allowed_hosts.iter().any(|h| h.as_str() == host) { + return Err(ServerError::RequestError(anyhow!("Bad Host"))); + } + + Ok(next.run(req).await) +} diff --git a/server/src/narinfo/mod.rs b/server/src/narinfo/mod.rs new file mode 100644 index 0000000..04dd44d --- /dev/null +++ b/server/src/narinfo/mod.rs @@ -0,0 +1,283 @@ +//! NAR info. +//! +//! ## `.narinfo` format +//! +//! An example of [a valid +//! .narinfo](https://cache.nixos.org/p4pclmv1gyja5kzc26npqpia1qqxrf0l.narinfo) +//! signed by https://cache.nixos.org: +//! +//! ```text +//! StorePath: /nix/store/p4pclmv1gyja5kzc26npqpia1qqxrf0l-ruby-2.7.3 +//! URL: nar/1w1fff338fvdw53sqgamddn1b2xgds473pv6y13gizdbqjv4i5p3.nar.xz +//! Compression: xz +//! FileHash: sha256:1w1fff338fvdw53sqgamddn1b2xgds473pv6y13gizdbqjv4i5p3 +//! FileSize: 4029176 +//! NarHash: sha256:1impfw8zdgisxkghq9a3q7cn7jb9zyzgxdydiamp8z2nlyyl0h5h +//! NarSize: 18735072 +//! References: 0d71ygfwbmy1xjlbj1v027dfmy9cqavy-libffi-3.3 0dbbrvlw2rahvzi69bmpqy1z9mvzg62s-gdbm-1.19 0i6vphc3vnr8mg0gxjr61564hnp0s2md-gnugrep-3.6 0vkw1m51q34dr64z5i87dy99an4hfmyg-coreutils-8.32 64ylsrpd025kcyi608w3dqckzyz57mdc-libyaml-0.2.5 65ys3k6gn2s27apky0a0la7wryg3az9q-zlib-1.2.11 9m4hy7cy70w6v2rqjmhvd7ympqkj6yxk-ncurses-6.2 a4yw1svqqk4d8lhwinn9xp847zz9gfma-bash-4.4-p23 hbm0951q7xrl4qd0ccradp6bhjayfi4b-openssl-1.1.1k hjwjf3bj86gswmxva9k40nqx6jrb5qvl-readline-6.3p08 p4pclmv1gyja5kzc26npqpia1qqxrf0l-ruby-2.7.3 sbbifs2ykc05inws26203h0xwcadnf0l-glibc-2.32-46Deriver: bidkcs01mww363s4s7akdhbl6ws66b0z-ruby-2.7.3.drv +//! Sig: cache.nixos.org-1:GrGV/Ls10TzoOaCnrcAqmPbKXFLLSBDeGNh5EQGKyuGA4K1wv1LcRVb6/sU+NAPK8lDiam8XcdJzUngmdhfTBQ== +//! ``` +//! +//! Consult the following files for the Nix implementation: +//! +//! - `src/libstore/nar-info.cc` +//! - `src/libstore/path-info.hh` +//! +//! They provide valuable information on what are the required +//! fields. +//! +//! ## Fingerprint +//! +//! The fingerprint format is described in `perl/lib/Nix/Manifest.pm` (`sub +//! fingerprintAuth`). Each fingerprint contains the full store path, the +//! NAR hash, the NAR size, as well as a list of references (full store +//! paths). The format is as follows: +//! +//! ```text +//! 1;{storePath};{narHash};{narSize};{commaDelimitedReferences} +//! ``` + +use std::os::unix::ffi::OsStrExt; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::string::ToString; + +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use serde::de; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +use crate::error::{ServerError, ServerResult}; +use crate::nix_manifest::{self, SpaceDelimitedList}; +use attic::hash::Hash; +use attic::mime; +use attic::signing::NixKeypair; + +#[cfg(test)] +mod tests; + +/// NAR information. +#[serde_as] +#[derive(Serialize, Deserialize)] +pub struct NarInfo { + /// The full store path being cached, including the store directory. + /// + /// Part of the fingerprint. + /// + /// Example: `/nix/store/p4pclmv1gyja5kzc26npqpia1qqxrf0l-ruby-2.7.3`. + #[serde(rename = "StorePath")] + pub store_path: PathBuf, + + /// The URL to fetch the object. + /// + /// This can either be relative to the base cache URL (`cacheUri`), + /// or be an full, absolute URL. + /// + /// Example: `nar/1w1fff338fvdw53sqgamddn1b2xgds473pv6y13gizdbqjv4i5p3.nar.xz` + /// Example: `https://cache.nixos.org/nar/1w1fff338fvdw53sqgamddn1b2xgds473pv6y13gizdbqjv4i5p3.nar.xz` + /// + /// Nix implementation: + #[serde(rename = "URL")] + pub url: String, + + /// Compression in use. + #[serde(rename = "Compression")] + pub compression: Compression, + + /// The hash of the compressed file. + #[serde(rename = "FileHash")] + pub file_hash: Hash, + + /// The size of the compressed file. + #[serde(rename = "FileSize")] + pub file_size: usize, + + /// The hash of the NAR archive. + /// + /// Part of the fingerprint. + #[serde(rename = "NarHash")] + pub nar_hash: Hash, + + /// The size of the NAR archive. + /// + /// Part of the fingerprint. + #[serde(rename = "NarSize")] + pub nar_size: usize, + + /// Other store paths this object directly refereces. + /// + /// This only includes the base paths, not the store directory itself. + /// + /// Part of the fingerprint. + /// + /// Example element: `j5p0j1w27aqdzncpw73k95byvhh5prw2-glibc-2.33-47` + #[serde(rename = "References")] + #[serde_as(as = "SpaceDelimitedList")] + pub references: Vec, + + /// The system this derivation is built for. + #[serde(rename = "System")] + #[serde(skip_serializing_if = "Option::is_none")] + pub system: Option, + + /// The derivation that produced this object. + #[serde(rename = "Deriver")] + #[serde(default)] + #[serde(deserialize_with = "deserialize_deriver")] + #[serde(skip_serializing_if = "Option::is_none")] + pub deriver: Option, + + /// The signature of the object. + /// + /// The `Sig` field can be duplicated to include multiple + /// signatures, but we only support one for now. + #[serde(rename = "Sig")] + #[serde(skip_serializing_if = "Option::is_none")] + pub signature: Option, + + /// The content address of the object. + #[serde(rename = "CA")] + #[serde(skip_serializing_if = "Option::is_none")] + pub ca: Option, +} + +/// NAR compression type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum Compression { + #[serde(rename = "none")] + None, + #[serde(rename = "xz")] + Xz, + #[serde(rename = "bzip2")] + Bzip2, + #[serde(rename = "br")] + Brotli, + #[serde(rename = "zstd")] + Zstd, +} + +impl NarInfo { + /// Parses a narinfo from a string. + pub fn from_str(manifest: &str) -> ServerResult { + nix_manifest::from_str(manifest) + } + + /// Returns the serialized representation of the narinfo. + pub fn to_string(&self) -> ServerResult { + nix_manifest::to_string(self) + } + + /// Returns the signature of this object, if it exists. + pub fn signature(&self) -> Option<&String> { + self.signature.as_ref() + } + + /// Returns the store directory of this object. + pub fn store_dir(&self) -> &Path { + // FIXME: Validate store_path + self.store_path.parent().unwrap() + } + + /// Signs the narinfo and adds the signature to the narinfo. + pub fn sign(&mut self, keypair: &NixKeypair) { + let signature = self.sign_readonly(keypair); + self.signature = Some(signature); + } + + /// Returns the fingerprint of the object. + pub fn fingerprint(&self) -> Vec { + let store_dir = self.store_dir(); + let mut fingerprint = b"1;".to_vec(); + + // 1;{storePath};{narHash};{narSize};{commaDelimitedReferences} + + // storePath + fingerprint.extend(self.store_path.as_os_str().as_bytes()); + fingerprint.extend(b";"); + + // narHash + fingerprint.extend(self.nar_hash.to_typed_base32().as_bytes()); + fingerprint.extend(b";"); + + // narSize + let mut buf = itoa::Buffer::new(); + let nar_size = buf.format(self.nar_size); + fingerprint.extend(nar_size.as_bytes()); + fingerprint.extend(b";"); + + // commaDelimitedReferences + let mut iter = self.references.iter().peekable(); + while let Some(reference) = iter.next() { + fingerprint.extend(store_dir.as_os_str().as_bytes()); + fingerprint.extend(b"/"); + fingerprint.extend(reference.as_bytes()); + + if iter.peek().is_some() { + fingerprint.extend(b","); + } + } + + fingerprint + } + + /// Signs the narinfo with a keypair, returning the signature. + fn sign_readonly(&self, keypair: &NixKeypair) -> String { + let fingerprint = self.fingerprint(); + keypair.sign(&fingerprint) + } +} + +impl IntoResponse for NarInfo { + fn into_response(self) -> Response { + match nix_manifest::to_string(&self) { + Ok(body) => Response::builder() + .status(StatusCode::OK) + .header("Content-Type", mime::NARINFO) + .body(body) + .unwrap() + .into_response(), + Err(e) => e.into_response(), + } + } +} + +impl FromStr for Compression { + type Err = ServerError; + + fn from_str(s: &str) -> ServerResult { + match s { + "none" => Ok(Self::None), + "xz" => Ok(Self::Xz), + "bzip2" => Ok(Self::Bzip2), + "br" => Ok(Self::Brotli), + "zstd" => Ok(Self::Zstd), + _ => Err(ServerError::InvalidCompressionType { + name: s.to_string(), + }), + } + } +} + +impl ToString for Compression { + fn to_string(&self) -> String { + String::from(match self { + Self::None => "none", + Self::Xz => "xz", + Self::Bzip2 => "bzip2", + Self::Brotli => "br", + Self::Zstd => "zstd", + }) + } +} + +pub fn deserialize_deriver<'de, D>(deserializer: D) -> Result, D::Error> +where + D: de::Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + match s.as_str() { + "unknown-deriver" => Ok(None), + _ => Ok(Some(s)), + } +} diff --git a/server/src/narinfo/tests.rs b/server/src/narinfo/tests.rs new file mode 100644 index 0000000..de43366 --- /dev/null +++ b/server/src/narinfo/tests.rs @@ -0,0 +1,128 @@ +use super::*; + +use std::path::Path; + +use attic::signing::NixPublicKey; + +#[test] +fn test_basic() { + let s = r#" +StorePath: /nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 +URL: nar/0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9.nar.xz +Compression: xz +FileHash: sha256:0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9 +FileSize: 41104 +NarHash: sha256:16mvl7v0ylzcg2n3xzjn41qhzbmgcn5iyarx16nn5l2r36n2kqci +NarSize: 206104 +References: 563528481rvhc5kxwipjmg6rqrl95mdx-glibc-2.33-56 xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 +Deriver: vvb4wxmnjixmrkhmj2xb75z62hrr41i7-hello-2.10.drv +Sig: cache.nixos.org-1:lo9EfNIL4eGRuNh7DTbAAffWPpI2SlYC/8uP7JnhgmfRIUNGhSbFe8qEaKN0mFS02TuhPpXFPNtRkFcCp0hGAQ== + "#; + + let narinfo = NarInfo::from_str(s).expect("Could not parse narinfo"); + + fn verify_narinfo(narinfo: &NarInfo) { + assert_eq!( + Path::new("/nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10"), + narinfo.store_path + ); + assert_eq!(Path::new("/nix/store"), narinfo.store_dir()); + assert_eq!( + "nar/0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9.nar.xz", + narinfo.url + ); + assert_eq!(Compression::Xz, narinfo.compression); + assert_eq!( + "sha256:0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9", + narinfo.file_hash.to_typed_base32() + ); + assert_eq!(41104, narinfo.file_size); + assert_eq!( + "sha256:16mvl7v0ylzcg2n3xzjn41qhzbmgcn5iyarx16nn5l2r36n2kqci", + narinfo.nar_hash.to_typed_base32() + ); + assert_eq!(206104, narinfo.nar_size); + assert_eq!( + vec![ + "563528481rvhc5kxwipjmg6rqrl95mdx-glibc-2.33-56", + "xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10", + ], + narinfo.references + ); + assert_eq!( + Some("vvb4wxmnjixmrkhmj2xb75z62hrr41i7-hello-2.10.drv".to_string()), + narinfo.deriver + ); + assert_eq!(Some("cache.nixos.org-1:lo9EfNIL4eGRuNh7DTbAAffWPpI2SlYC/8uP7JnhgmfRIUNGhSbFe8qEaKN0mFS02TuhPpXFPNtRkFcCp0hGAQ==".to_string()), narinfo.signature); + } + + verify_narinfo(&narinfo); + + let round_trip = narinfo.to_string().expect("Could not serialize narinfo"); + + eprintln!("{}", round_trip); + + let reparse = NarInfo::from_str(&round_trip).expect("Could not re-parse serialized narinfo"); + + verify_narinfo(&reparse); +} + +#[test] +fn test_deriver() { + let s = r#" +StorePath: /nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 +URL: nar/0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9.nar.xz +Compression: xz +FileHash: sha256:0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9 +FileSize: 41104 +NarHash: sha256:16mvl7v0ylzcg2n3xzjn41qhzbmgcn5iyarx16nn5l2r36n2kqci +NarSize: 206104 +References: 563528481rvhc5kxwipjmg6rqrl95mdx-glibc-2.33-56 xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 +Deriver: unknown-deriver + "#; + + let narinfo = NarInfo::from_str(s).expect("Could not parse narinfo"); + + assert_eq!(None, narinfo.deriver); +} + +#[test] +fn test_fingerprint() { + let s = r#" +StorePath: /nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 +URL: nar/0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9.nar.xz +Compression: xz +FileHash: sha256:0nqgf15qfiacfxrgm2wkw0gwwncjqqzzalj8rs14w9srkydkjsk9 +FileSize: 41104 +NarHash: sha256:91e129ac1959d062ad093d2b1f8b65afae0f712056fe3eac78ec530ff6a1bb9a +NarSize: 206104 +References: 563528481rvhc5kxwipjmg6rqrl95mdx-glibc-2.33-56 xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10 +Deriver: vvb4wxmnjixmrkhmj2xb75z62hrr41i7-hello-2.10.drv +Sig: cache.nixos.org-1:lo9EfNIL4eGRuNh7DTbAAffWPpI2SlYC/8uP7JnhgmfRIUNGhSbFe8qEaKN0mFS02TuhPpXFPNtRkFcCp0hGAQ== + "#; + + let correct_fingerprint = b"1;/nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10;sha256:16mvl7v0ylzcg2n3xzjn41qhzbmgcn5iyarx16nn5l2r36n2kqci;206104;/nix/store/563528481rvhc5kxwipjmg6rqrl95mdx-glibc-2.33-56,/nix/store/xcp9cav49dmsjbwdjlmkjxj10gkpx553-hello-2.10"; + + let public_key = + NixPublicKey::from_str("cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=") + .expect("Could not import cache.nixos.org public key"); + + let narinfo = NarInfo::from_str(s).expect("Could not parse narinfo"); + + let fingerprint = narinfo.fingerprint(); + + eprintln!( + "Expected: {}", + String::from_utf8(correct_fingerprint.to_vec()).unwrap() + ); + eprintln!( + " Actual: {}", + String::from_utf8(fingerprint.clone()).unwrap() + ); + + assert_eq!(correct_fingerprint, fingerprint.as_slice()); + + public_key + .verify(&narinfo.fingerprint(), narinfo.signature().unwrap()) + .expect("Could not verify signature"); +} diff --git a/server/src/nix_manifest/deserializer.rs b/server/src/nix_manifest/deserializer.rs new file mode 100644 index 0000000..947b943 --- /dev/null +++ b/server/src/nix_manifest/deserializer.rs @@ -0,0 +1,409 @@ +//! The deserializer. +//! +//! This maps the manifest format into the serde data model. + +use std::ops::{AddAssign, MulAssign}; + +use serde::de::{DeserializeSeed, IntoDeserializer, MapAccess, Visitor}; +use serde::{de, forward_to_deserialize_any}; + +use super::{Error, Result}; + +/// The main deserializer. +pub struct Deserializer<'de> { + input: &'de str, +} + +/// Deserializer for values. +pub struct ValueDeserializer<'a, 'de: 'a>(&'a mut Deserializer<'de>); + +impl<'de> Deserializer<'de> { + pub fn from_str(input: &'de str) -> Self { + Deserializer { input } + } +} + +// from https://serde.rs/impl-deserializer.html +impl<'de> Deserializer<'de> { + fn peek_char(&mut self) -> Result { + self.input.chars().next().ok_or(Error::UnexpectedEof) + } + + fn next_char(&mut self) -> Result { + let ch = self.peek_char()?; + self.input = &self.input[ch.len_utf8()..]; + Ok(ch) + } + + fn consume_whitespace(&mut self) -> Result<()> { + match self.input.find(|c| !matches!(c, ' ' | '\n' | '\r' | '\t')) { + Some(idx) => { + self.input = &self.input[idx..]; + } + None => { + self.input = &self.input[..0]; + } + } + Ok(()) + } + + fn peek_until_eol(&mut self) -> Result<&'de str> { + match self.input.find(|c| c == '\r' || c == '\n') { + Some(idx) => Ok(&self.input[..idx]), + None => Ok(self.input), + } + } + + fn parse_until_eol(&mut self) -> Result<&'de str> { + let s = self.peek_until_eol()?; + self.input = &self.input[s.len()..]; + Ok(s) + } + + fn parse_unsigned(&mut self) -> Result + where + T: AddAssign + MulAssign + From, + { + let mut int = match self.next_char()? { + ch @ '0'..='9' => T::from(ch as u8 - b'0'), + _ => { + return Err(Error::ExpectedInteger); + } + }; + loop { + match self.input.chars().next() { + Some(ch @ '0'..='9') => { + self.input = &self.input[1..]; + int *= T::from(10); + int += T::from(ch as u8 - b'0'); + } + _ => { + return Ok(int); + } + } + } + } + + fn parse_bool(&mut self) -> Result { + if self.input.starts_with('1') { + self.input = &self.input["1".len()..]; + Ok(true) + } else if self.input.starts_with('0') { + self.input = &self.input["0".len()..]; + Ok(false) + } else { + Err(Error::ExpectedBoolean) + } + } +} + +impl<'de, 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { + type Error = Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + // top level must be a map + self.deserialize_map(visitor) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple tuple_struct + enum ignored_any + } + + fn deserialize_map(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_map(self) + } + + fn deserialize_struct( + self, + _name: &'static str, + _fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.deserialize_map(visitor) + } + + fn deserialize_identifier(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.consume_whitespace()?; + + let line = self.peek_until_eol()?; + let colon = line.find(':').ok_or_else(|| { + eprintln!("Current input: {:?}", self.input); + Error::ExpectedColon + })?; + + let identifier = &self.input[..colon]; + + self.input = &self.input[colon..]; + visitor.visit_borrowed_str(identifier) + } +} + +impl<'de> MapAccess<'de> for Deserializer<'de> { + type Error = Error; + + fn next_key_seed(&mut self, seed: K) -> Result> + where + K: DeserializeSeed<'de>, + { + self.consume_whitespace()?; + + if self.input.is_empty() { + return Ok(None); + } + + seed.deserialize(&mut *self).map(Some) + } + + fn next_value_seed(&mut self, seed: V) -> Result + where + V: DeserializeSeed<'de>, + { + if self.next_char()? != ':' { + return Err(Error::ExpectedColon); + } + + self.consume_whitespace()?; + + seed.deserialize(&mut ValueDeserializer(self)) + } +} + +impl<'de, 'a> de::Deserializer<'de> for &'a mut ValueDeserializer<'a, 'de> { + type Error = Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_str(visitor) + } + + fn deserialize_bool(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_bool(self.0.parse_bool()?) + } + + fn deserialize_i8(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::Unsupported("Signed integer")) + } + + fn deserialize_i16(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::Unsupported("Signed integer")) + } + + fn deserialize_i32(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::Unsupported("Signed integer")) + } + + fn deserialize_i64(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::Unsupported("Signed integer")) + } + + fn deserialize_u8(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_u8(self.0.parse_unsigned()?) + } + + fn deserialize_u16(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_u16(self.0.parse_unsigned()?) + } + + fn deserialize_u32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_u32(self.0.parse_unsigned()?) + } + + fn deserialize_u64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_u64(self.0.parse_unsigned()?) + } + + fn deserialize_f32(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::FloatUnsupported) + } + + fn deserialize_f64(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::FloatUnsupported) + } + + fn deserialize_char(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::Unsupported("Char")) + } + + // only accepted in maps + fn deserialize_str(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_borrowed_str(self.0.parse_until_eol()?.trim_start()) + } + + // only accepted in maps + fn deserialize_string(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_str(visitor) + } + + fn deserialize_bytes(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::Unsupported("Byte sequence")) + } + + fn deserialize_byte_buf(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::Unsupported("Byte buffer")) + } + + fn deserialize_option(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + // in this format, if a key exists, then it must be Some + visitor.visit_some(self) + } + + fn deserialize_unit(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::Unsupported("Unit")) + } + + fn deserialize_unit_struct(self, _name: &'static str, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_unit(visitor) + } + + fn deserialize_newtype_struct(self, _name: &'static str, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_newtype_struct(self) + } + + fn deserialize_seq(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::Unsupported("Sequence")) + } + + fn deserialize_tuple(self, _len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_seq(visitor) + } + + fn deserialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.deserialize_seq(visitor) + } + + fn deserialize_map(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::NestedMapUnsupported) + } + + fn deserialize_struct( + self, + _name: &'static str, + _fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.deserialize_map(visitor) + } + + fn deserialize_enum( + self, + _name: &'static str, + _variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + let val = self.0.parse_until_eol()?; + visitor.visit_enum(val.into_deserializer()) + } + + fn deserialize_identifier(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::Unexpected("Identifier")) + } + + fn deserialize_ignored_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_any(visitor) + } +} diff --git a/server/src/nix_manifest/mod.rs b/server/src/nix_manifest/mod.rs new file mode 100644 index 0000000..9182fc0 --- /dev/null +++ b/server/src/nix_manifest/mod.rs @@ -0,0 +1,136 @@ +//! The Nix manifest format. +//! +//! Nix uses a simple format in binary cache manifests (`.narinfo`, +//! `/nix-cache-info`). It consists of a single, flat KV map with +//! colon (`:`) as the delimiter. +//! +//! It's not well-defined and the official implementation performs +//! serialization and deserialization by hand [1]. Here we implement +//! a deserializer and a serializer using the serde framework. +//! +//! An example of a `/nix-cache-info` file: +//! +//! ```text +//! StoreDir: /nix/store +//! WantMassQuery: 1 +//! Priority: 40 +//! ``` +//! +//! [1] + +mod deserializer; +mod serializer; + +#[cfg(test)] +mod tests; + +use std::fmt::Display; +use std::result::Result as StdResult; + +use displaydoc::Display; +use serde::{de, ser, Deserialize, Serialize}; +use serde_with::{formats::SpaceSeparator, StringWithSeparator}; + +use crate::error::{ServerError, ServerResult}; +use deserializer::Deserializer; +use serializer::Serializer; + +type Result = StdResult; + +pub fn from_str(s: &str) -> ServerResult +where + T: for<'de> Deserialize<'de>, +{ + let mut deserializer = Deserializer::from_str(s); + T::deserialize(&mut deserializer).map_err(ServerError::ManifestSerializationError) + + // FIXME: Reject extra output?? +} + +pub fn to_string(value: &T) -> ServerResult +where + T: Serialize, +{ + let mut serializer = Serializer::new(); + value + .serialize(&mut serializer) + .map_err(ServerError::ManifestSerializationError)?; + + Ok(serializer.into_output()) +} + +/// An error during (de)serialization. +#[derive(Debug, Display)] +pub enum Error { + /// Unexpected {0}. + Unexpected(&'static str), + + /// Unexpected EOF. + UnexpectedEof, + + /// Expected a colon. + ExpectedColon, + + /// Expected a boolean. + ExpectedBoolean, + + /// Expected an integer. + ExpectedInteger, + + /// "{0}" values are unsupported. + Unsupported(&'static str), + + /// Not possible to auto-determine the type. + AnyUnsupported, + + /// None is unsupported. Add #[serde(skip_serializing_if = "Option::is_none")] + NoneUnsupported, + + /// Nested maps are unsupported. + NestedMapUnsupported, + + /// Floating point numbers are unsupported. + FloatUnsupported, + + /// Custom error: {0} + Custom(String), +} + +/// Custom (de)serializer for a space-delimited list. +/// +/// Example usage: +/// +/// ``` +/// use serde::Deserialize; +/// use serde_with::serde_as; +/// # use attic_server::nix_manifest::{self, SpaceDelimitedList}; +/// +/// #[serde_as] +/// #[derive(Debug, Deserialize)] +/// struct MyManifest { +/// #[serde_as(as = "SpaceDelimitedList")] +/// some_list: Vec, +/// } +/// +/// let s = "some_list: item-a item-b"; +/// let parsed: MyManifest = nix_manifest::from_str(s).unwrap(); +/// +/// assert_eq!(vec![ "item-a", "item-b" ], parsed.some_list); +/// ``` +pub type SpaceDelimitedList = StringWithSeparator; + +impl std::error::Error for Error {} + +impl de::Error for Error { + fn custom(msg: T) -> Self { + let f = format!("{}", msg); + Self::Custom(f) + } +} + +impl ser::Error for Error { + fn custom(msg: T) -> Self { + let f = format!("{}", msg); + Self::Custom(f) + } +} diff --git a/server/src/nix_manifest/serializer.rs b/server/src/nix_manifest/serializer.rs new file mode 100644 index 0000000..6619b35 --- /dev/null +++ b/server/src/nix_manifest/serializer.rs @@ -0,0 +1,336 @@ +//! The serializer. +//! +//! A bulk of the serde data model is unsupported due to the restricted +//! format. + +use serde::{ser, Serialize}; + +use super::{Error, Result}; + +pub struct Serializer { + output: String, + seen_map: bool, +} + +impl Serializer { + pub(super) fn new() -> Self { + Self { + output: String::new(), + seen_map: false, + } + } + + pub(super) fn into_output(self) -> String { + self.output + } +} + +impl<'a> ser::Serializer for &'a mut Serializer { + type Ok = (); + + type Error = Error; + + type SerializeSeq = Self; + type SerializeTuple = Self; + type SerializeTupleStruct = Self; + type SerializeTupleVariant = Self; + type SerializeMap = Self; + type SerializeStruct = Self; + type SerializeStructVariant = Self; + + fn serialize_bool(self, v: bool) -> Result<()> { + self.output += if v { "1" } else { "0" }; + Ok(()) + } + + // Integers + fn serialize_i8(self, v: i8) -> Result<()> { + self.serialize_i64(i64::from(v)) + } + + fn serialize_i16(self, v: i16) -> Result<()> { + self.serialize_i64(i64::from(v)) + } + + fn serialize_i32(self, v: i32) -> Result<()> { + self.serialize_i64(i64::from(v)) + } + + fn serialize_i64(self, v: i64) -> Result<()> { + let mut buf = itoa::Buffer::new(); + self.output += buf.format(v); + Ok(()) + } + + fn serialize_u8(self, v: u8) -> Result<()> { + self.serialize_u64(u64::from(v)) + } + + fn serialize_u16(self, v: u16) -> Result<()> { + self.serialize_u64(u64::from(v)) + } + + fn serialize_u32(self, v: u32) -> Result<()> { + self.serialize_u64(u64::from(v)) + } + + fn serialize_u64(self, v: u64) -> Result<()> { + let mut buf = itoa::Buffer::new(); + self.output += buf.format(v); + Ok(()) + } + + // Floats + + fn serialize_f32(self, v: f32) -> Result<()> { + self.serialize_f64(f64::from(v)) + } + + fn serialize_f64(self, v: f64) -> Result<()> { + let mut buf = ryu::Buffer::new(); + self.output += buf.format(v); + Ok(()) + } + + // Strings + fn serialize_char(self, v: char) -> Result<()> { + self.serialize_str(&v.to_string()) + } + + fn serialize_str(self, v: &str) -> Result<()> { + self.output += v; + Ok(()) + } + + // Bytes + fn serialize_bytes(self, _v: &[u8]) -> Result<()> { + Err(Error::Unsupported("Byte sequence")) + } + + // Optionals + fn serialize_none(self) -> Result<()> { + Err(Error::NoneUnsupported) + } + + fn serialize_some(self, value: &T) -> Result<()> + where + T: ?Sized + Serialize, + { + value.serialize(self) + } + + // Enum + fn serialize_unit(self) -> Result<()> { + Err(Error::Unsupported("Unit")) + } + + fn serialize_unit_struct(self, _name: &'static str) -> Result<()> { + self.serialize_unit() + } + + fn serialize_unit_variant( + self, + _name: &'static str, + _variant_index: u32, + variant: &'static str, + ) -> Result<()> { + self.serialize_str(variant) + } + + fn serialize_newtype_struct(self, _name: &'static str, value: &T) -> Result<()> + where + T: ?Sized + Serialize, + { + value.serialize(self) + } + + fn serialize_newtype_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _value: &T, + ) -> Result<()> + where + T: ?Sized + Serialize, + { + Err(Error::Unsupported("Newtype variant")) + } + + // Compund types + fn serialize_seq(self, _len: Option) -> Result { + Err(Error::Unsupported("Sequence")) + } + + fn serialize_tuple(self, len: usize) -> Result { + self.serialize_seq(Some(len)) + } + + fn serialize_tuple_struct( + self, + _name: &'static str, + len: usize, + ) -> Result { + self.serialize_seq(Some(len)) + } + + fn serialize_tuple_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Err(Error::Unsupported("Tuple variant")) + } + + fn serialize_map(self, _len: Option) -> Result { + if self.seen_map { + return Err(Error::NestedMapUnsupported); + } + + self.seen_map = true; + Ok(self) + } + + fn serialize_struct(self, _name: &'static str, len: usize) -> Result { + self.serialize_map(Some(len)) + } + + fn serialize_struct_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Err(Error::Unsupported("Struct variant")) + } +} + +impl<'a> ser::SerializeSeq for &'a mut Serializer { + type Ok = (); + type Error = Error; + + // Serialize a single element of the sequence. + fn serialize_element(&mut self, _value: &T) -> Result<()> + where + T: ?Sized + Serialize, + { + Err(Error::Unsupported("Sequence")) + } + + // Close the sequence. + fn end(self) -> Result<()> { + Err(Error::Unsupported("Sequence")) + } +} + +impl<'a> ser::SerializeTuple for &'a mut Serializer { + type Ok = (); + type Error = Error; + + fn serialize_element(&mut self, _value: &T) -> Result<()> + where + T: ?Sized + Serialize, + { + Err(Error::Unsupported("Tuple")) + } + + fn end(self) -> Result<()> { + Err(Error::Unsupported("Tuple")) + } +} + +impl<'a> ser::SerializeTupleStruct for &'a mut Serializer { + type Ok = (); + type Error = Error; + + fn serialize_field(&mut self, _value: &T) -> Result<()> + where + T: ?Sized + Serialize, + { + Err(Error::Unsupported("Tuple struct")) + } + + fn end(self) -> Result<()> { + Err(Error::Unsupported("Tuple struct")) + } +} + +impl<'a> ser::SerializeTupleVariant for &'a mut Serializer { + type Ok = (); + type Error = Error; + + fn serialize_field(&mut self, _value: &T) -> Result<()> + where + T: ?Sized + Serialize, + { + Err(Error::Unsupported("Tuple variant")) + } + + fn end(self) -> Result<()> { + Err(Error::Unsupported("Tuple variant")) + } +} + +impl<'a> ser::SerializeMap for &'a mut Serializer { + type Ok = (); + type Error = Error; + + fn serialize_key(&mut self, key: &T) -> Result<()> + where + T: ?Sized + Serialize, + { + key.serialize(&mut **self) + } + + fn serialize_value(&mut self, value: &T) -> Result<()> + where + T: ?Sized + Serialize, + { + self.output += ": "; + value.serialize(&mut **self) + } + + fn end(self) -> Result<()> { + self.output += "\n"; + Ok(()) + } +} + +impl<'a> ser::SerializeStruct for &'a mut Serializer { + type Ok = (); + type Error = Error; + + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<()> + where + T: ?Sized + Serialize, + { + key.serialize(&mut **self)?; + self.output += ": "; + value.serialize(&mut **self)?; + self.output += "\n"; + Ok(()) + } + + fn end(self) -> Result<()> { + Ok(()) + } +} + +impl<'a> ser::SerializeStructVariant for &'a mut Serializer { + type Ok = (); + type Error = Error; + + fn serialize_field(&mut self, _key: &'static str, _value: &T) -> Result<()> + where + T: ?Sized + Serialize, + { + Err(Error::Unsupported("Struct variant")) + } + + fn end(self) -> Result<()> { + Err(Error::Unsupported("Struct variant")) + } +} diff --git a/server/src/nix_manifest/tests.rs b/server/src/nix_manifest/tests.rs new file mode 100644 index 0000000..2be1bd6 --- /dev/null +++ b/server/src/nix_manifest/tests.rs @@ -0,0 +1,55 @@ +use std::path::PathBuf; + +use serde::{Deserialize, Serialize}; + +/// A hypothetical manifest. +#[derive(Debug, PartialEq, Deserialize, Serialize)] +struct HypotheticalManifest { + #[serde(rename = "StoreDir")] + store_dir: PathBuf, + + #[serde(rename = "WantMassQuery")] + want_mass_query: bool, +} + +#[test] +fn test_basic() { + let manifest = r#" +StoreDir: /nix/store +WantMassQuery: 1 + "#; + + let expected = HypotheticalManifest { + store_dir: PathBuf::from("/nix/store"), + want_mass_query: true, + }; + + let parsed = super::from_str::(manifest).unwrap(); + assert_eq!(parsed, expected); + + // TODO: Use the actual Nix parser to reparse the resulting manifest? + let round_trip = super::to_string(&parsed).unwrap(); + + // FIXME: This is pretty fragile. Just testing that it can be parsed again should + // be enough. + assert_eq!(manifest.trim(), round_trip.trim()); + + let parsed2 = super::from_str::(&round_trip).unwrap(); + assert_eq!(parsed2, expected); +} + +#[test] +fn test_unquoted_number() { + let manifest = r#" +StoreDir: 12345 +WantMassQuery: 1 + "#; + + let expected = HypotheticalManifest { + store_dir: PathBuf::from("12345"), + want_mass_query: true, + }; + + let parsed = super::from_str::(manifest).unwrap(); + assert_eq!(parsed, expected); +} diff --git a/server/src/oobe.rs b/server/src/oobe.rs new file mode 100644 index 0000000..878a20e --- /dev/null +++ b/server/src/oobe.rs @@ -0,0 +1,103 @@ +//! Guided out-of-box experience. +//! +//! This performs automatic setup for people running `atticd` +//! directly without specifying any configurations. The goal is +//! to let them quickly have a taste of Attic with a config +//! template that provide guidance for them to achieve a more +//! permanent setup. +//! +//! Paths: +//! - Config: `~/.config/attic/server.yaml` +//! - SQLite: `~/.local/share/attic/server.db` +//! - NARs: `~/.local/share/attic/storage` + +use anyhow::Result; +use chrono::{Months, Utc}; +use rand::distributions::Alphanumeric; +use rand::Rng; +use tokio::fs::{self, OpenOptions}; + +use crate::access::{JwtEncodingKey, Token}; +use crate::config; +use attic::cache::CacheNamePattern; + +const CONFIG_TEMPLATE: &str = include_str!("config-template.toml"); + +pub async fn run_oobe() -> Result<()> { + let config_path = config::get_xdg_config_path()?; + + if config_path.exists() { + return Ok(()); + } + + let data_path = config::get_xdg_data_path()?; + + // Generate a simple config + let database_path = data_path.join("server.db"); + let database_url = format!("sqlite://{}", database_path.to_str().unwrap()); + OpenOptions::new() + .create(true) + .write(true) + .open(&database_path) + .await?; + + let storage_path = data_path.join("storage"); + fs::create_dir_all(&storage_path).await?; + + let hs256_secret_base64 = { + let random: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(128) + .map(char::from) + .collect(); + + base64::encode(random) + }; + + let config_content = CONFIG_TEMPLATE + .replace("%database_url%", &database_url) + .replace("%storage_path%", storage_path.to_str().unwrap()) + .replace("%token_hs256_secret_base64%", &hs256_secret_base64); + + fs::write(&config_path, config_content.as_bytes()).await?; + + // Generate a JWT token + let root_token = { + let in_two_years = Utc::now().checked_add_months(Months::new(24)).unwrap(); + let mut token = Token::new("root".to_string(), &in_two_years); + let any_cache = CacheNamePattern::new("*".to_string()).unwrap(); + let mut perm = token.get_or_insert_permission_mut(any_cache); + perm.pull = true; + perm.push = true; + perm.delete = true; + perm.create_cache = true; + perm.configure_cache = true; + perm.configure_cache_retention = true; + perm.destroy_cache = true; + + let encoding_key = JwtEncodingKey::from_base64_secret(&hs256_secret_base64)?; + token.encode(&encoding_key)? + }; + + eprintln!(); + eprintln!("-----------------"); + eprintln!("Welcome to Attic!"); + eprintln!(); + eprintln!("A simple setup using SQLite and local storage has been configured for you in:"); + eprintln!(); + eprintln!(" {}", config_path.to_str().unwrap()); + eprintln!(); + eprintln!("Run the following command to log into this server:"); + eprintln!(); + eprintln!(" attic login local http://localhost:8080 {root_token}"); + eprintln!(); + eprintln!("Documentations and guides:"); + eprintln!(); + eprintln!(" https://docs.attic.rs"); + eprintln!(); + eprintln!("Enjoy!"); + eprintln!("-----------------"); + eprintln!(); + + Ok(()) +} diff --git a/server/src/storage/local.rs b/server/src/storage/local.rs new file mode 100644 index 0000000..b5d5ac4 --- /dev/null +++ b/server/src/storage/local.rs @@ -0,0 +1,116 @@ +//! Local file storage. + +use std::path::PathBuf; + +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use tokio::fs::{self, File}; +use tokio::io::{self, AsyncRead}; + +use super::{Download, RemoteFile, StorageBackend}; +use crate::error::{ServerError, ServerResult}; + +#[derive(Debug)] +pub struct LocalBackend { + config: LocalStorageConfig, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct LocalStorageConfig { + /// The directory to store all files under. + path: PathBuf, +} + +/// Reference to a file in local storage. +/// +/// We still call it "remote file" for consistency :) +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct LocalRemoteFile { + /// Name of the file. + pub name: String, +} + +impl LocalBackend { + pub async fn new(config: LocalStorageConfig) -> ServerResult { + fs::create_dir_all(&config.path) + .await + .map_err(ServerError::remote_file_error)?; + + Ok(Self { config }) + } + + fn get_path(&self, p: &str) -> PathBuf { + self.config.path.join(p) + } +} + +#[async_trait] +impl StorageBackend for LocalBackend { + async fn upload_file( + &self, + name: String, + mut stream: &mut (dyn AsyncRead + Unpin + Send), + ) -> ServerResult { + let mut file = File::create(self.get_path(&name)) + .await + .map_err(ServerError::remote_file_error)?; + + io::copy(&mut stream, &mut file) + .await + .map_err(ServerError::remote_file_error)?; + + Ok(RemoteFile::Local(LocalRemoteFile { name })) + } + + async fn delete_file(&self, name: String) -> ServerResult<()> { + fs::remove_file(self.get_path(&name)) + .await + .map_err(ServerError::remote_file_error)?; + + Ok(()) + } + + async fn delete_file_db(&self, file: &RemoteFile) -> ServerResult<()> { + let file = if let RemoteFile::Local(file) = file { + file + } else { + return Err(ServerError::RemoteFileError(anyhow::anyhow!( + "Does not understand the remote file reference" + ))); + }; + + fs::remove_file(self.get_path(&file.name)) + .await + .map_err(ServerError::remote_file_error)?; + + Ok(()) + } + + async fn download_file(&self, name: String) -> ServerResult { + let file = File::open(self.get_path(&name)) + .await + .map_err(ServerError::remote_file_error)?; + + Ok(Download::Stream(Box::new(file))) + } + + async fn download_file_db(&self, file: &RemoteFile) -> ServerResult { + let file = if let RemoteFile::Local(file) = file { + file + } else { + return Err(ServerError::RemoteFileError(anyhow::anyhow!( + "Does not understand the remote file reference" + ))); + }; + + let file = File::open(self.get_path(&file.name)) + .await + .map_err(ServerError::remote_file_error)?; + + Ok(Download::Stream(Box::new(file))) + } + + async fn make_db_reference(&self, name: String) -> ServerResult { + Ok(RemoteFile::Local(LocalRemoteFile { name })) + } +} diff --git a/server/src/storage/mod.rs b/server/src/storage/mod.rs new file mode 100644 index 0000000..fad2b1a --- /dev/null +++ b/server/src/storage/mod.rs @@ -0,0 +1,86 @@ +//! Remote file storage. + +mod local; +mod s3; + +use serde::{Deserialize, Serialize}; +use tokio::io::AsyncRead; + +use crate::error::ServerResult; + +pub(crate) use self::local::{LocalBackend, LocalRemoteFile, LocalStorageConfig}; +pub(crate) use self::s3::{S3Backend, S3RemoteFile, S3StorageConfig}; + +/// Reference to a location where a NAR is stored. +/// +/// To be compatible with the Nix Binary Cache API, the reference +/// must be able to be converted to a (time-limited) direct link +/// to the file that the client will be redirected to when they +/// request the NAR. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum RemoteFile { + /// File in an S3-compatible storage bucket. + S3(S3RemoteFile), + + /// File in local storage. + Local(LocalRemoteFile), + + /// A direct HTTP link. + /// + /// This is mostly here to facilitate testing. + Http(HttpRemoteFile), +} + +/// Way to download a file. +pub enum Download { + /// A redirect to a (possibly ephemeral) URL. + Redirect(String), + + /// A stream. + Stream(Box), +} + +// TODO: Maybe make RemoteFile the one true reference instead of having two sets of APIs? +/// A storage backend. +#[async_trait::async_trait] +pub trait StorageBackend: Send + Sync + std::fmt::Debug { + /// Uploads a file. + async fn upload_file( + &self, + name: String, + stream: &mut (dyn AsyncRead + Unpin + Send), + ) -> ServerResult; + + /// Deletes a file. + async fn delete_file(&self, name: String) -> ServerResult<()>; + + /// Deletes a file using a database reference. + async fn delete_file_db(&self, file: &RemoteFile) -> ServerResult<()>; + + /// Downloads a file using the current configuration. + async fn download_file(&self, name: String) -> ServerResult; + + /// Downloads a file using a database reference. + async fn download_file_db(&self, file: &RemoteFile) -> ServerResult; + + /// Creates a database reference for a file. + async fn make_db_reference(&self, name: String) -> ServerResult; +} + +/// Reference to an HTTP link from which the file can be downloaded. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct HttpRemoteFile { + /// URL of the file. + pub url: String, +} + +impl RemoteFile { + /// Returns the remote file ID. + pub fn remote_file_id(&self) -> String { + match self { + Self::S3(f) => format!("s3:{}/{}/{}", f.region, f.bucket, f.key), + Self::Http(f) => format!("http:{}", f.url), + Self::Local(f) => format!("local:{}", f.name), + } + } +} diff --git a/server/src/storage/s3.rs b/server/src/storage/s3.rs new file mode 100644 index 0000000..ef23a75 --- /dev/null +++ b/server/src/storage/s3.rs @@ -0,0 +1,374 @@ +//! S3 remote files. + +use std::time::Duration; + +use async_trait::async_trait; +use aws_sdk_s3::{ + config::Builder as S3ConfigBuilder, model::CompletedMultipartUpload, model::CompletedPart, + presigning::config::PresigningConfig, Client, Config as S3Config, Credentials, Endpoint, + Region, +}; +use futures::future::join_all; +use serde::{Deserialize, Serialize}; +use tokio::io::{AsyncRead, AsyncReadExt}; + +use super::{Download, RemoteFile, StorageBackend}; +use crate::error::{ServerError, ServerResult}; +use attic::util::Finally; + +/// The chunk size for each part in a multipart upload. +const CHUNK_SIZE: usize = 8 * 1024 * 1024; + +/// The S3 remote file storage backend. +#[derive(Debug)] +pub struct S3Backend { + client: Client, + config: S3StorageConfig, +} + +/// S3 remote file storage configuration. +#[derive(Debug, Clone, Deserialize)] +pub struct S3StorageConfig { + /// The AWS region. + region: String, + + /// The name of the bucket. + bucket: String, + + /// Custom S3 endpoint. + /// + /// Set this if you are using an S3-compatible object storage (e.g., Minio). + endpoint: Option, + + /// S3 credentials. + /// + /// If not specified, it's read from the `AWS_ACCESS_KEY_ID` and + /// `AWS_SECRET_ACCESS_KEY` environment variables. + credentials: Option, +} + +/// S3 credential configuration. +#[derive(Debug, Clone, Deserialize)] +pub struct S3CredentialsConfig { + /// Access key ID. + access_key_id: String, + + /// Secret access key. + secret_access_key: String, +} + +/// Reference to a file in an S3-compatible storage bucket. +/// +/// We store the region and bucket to facilitate migration. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct S3RemoteFile { + /// Name of the S3 region. + pub region: String, + + /// Name of the bucket. + pub bucket: String, + + /// Key of the file. + pub key: String, +} + +impl S3Backend { + pub fn new(config: S3StorageConfig) -> ServerResult { + let s3_config = Self::config_builder(&config)? + .region(Region::new(config.region.to_owned())) + .build(); + + Ok(Self { + client: Client::from_conf(s3_config), + config, + }) + } + + fn config_builder(config: &S3StorageConfig) -> ServerResult { + let mut builder = S3Config::builder(); + + if let Some(credentials) = &config.credentials { + builder = builder.credentials_provider(Credentials::new( + &credentials.access_key_id, + &credentials.secret_access_key, + None, + None, + "s3", + )); + } + + if let Some(endpoint) = &config.endpoint { + let endpoint = Endpoint::immutable(endpoint).map_err(ServerError::remote_file_error)?; + builder = builder.endpoint_resolver(endpoint); + } + + Ok(builder) + } + + fn get_client_from_db_ref<'a>( + &self, + file: &'a RemoteFile, + ) -> ServerResult<(Client, &'a S3RemoteFile)> { + let file = if let RemoteFile::S3(file) = file { + file + } else { + return Err(ServerError::RemoteFileError(anyhow::anyhow!( + "Does not understand the remote file reference" + ))); + }; + + // FIXME: Ugly + let client = if self.client.conf().region().unwrap().as_ref() == file.region { + self.client.clone() + } else { + // FIXME: Cache the client instance + let s3_conf = Self::config_builder(&self.config)? + .region(Region::new(file.region.to_owned())) + .build(); + Client::from_conf(s3_conf) + }; + + Ok((client, file)) + } +} + +#[async_trait] +impl StorageBackend for S3Backend { + async fn upload_file( + &self, + name: String, + mut stream: &mut (dyn AsyncRead + Unpin + Send), + ) -> ServerResult { + let first_chunk = read_chunk_async(&mut stream).await?; + + if first_chunk.len() < CHUNK_SIZE { + // do a normal PutObject + let put_object = self + .client + .put_object() + .bucket(&self.config.bucket) + .key(&name) + .body(first_chunk.into()) + .send() + .await + .map_err(ServerError::remote_file_error)?; + + tracing::debug!("put_object -> {:#?}", put_object); + + return Ok(RemoteFile::S3(S3RemoteFile { + region: self.config.region.clone(), + bucket: self.config.bucket.clone(), + key: name, + })); + } + + let multipart = self + .client + .create_multipart_upload() + .bucket(&self.config.bucket) + .key(&name) + .send() + .await + .map_err(ServerError::remote_file_error)?; + + let upload_id = multipart.upload_id().unwrap(); + + let cleanup = Finally::new({ + let bucket = self.config.bucket.clone(); + let client = self.client.clone(); + let upload_id = upload_id.to_owned(); + let name = name.clone(); + + async move { + tracing::warn!("Upload was interrupted - Aborting multipart upload"); + + let r = client + .abort_multipart_upload() + .bucket(bucket) + .key(name) + .upload_id(upload_id) + .send() + .await; + + if let Err(e) = r { + tracing::warn!("Failed to abort multipart upload: {}", e); + } + } + }); + + let mut part_number = 1; + let mut parts = Vec::new(); + let mut first_chunk = Some(first_chunk); + + loop { + let chunk = if part_number == 1 { + first_chunk.take().unwrap() + } else { + read_chunk_async(&mut stream).await? + }; + + if chunk.is_empty() { + break; + } + + let client = self.client.clone(); + let fut = tokio::task::spawn({ + client + .upload_part() + .bucket(&self.config.bucket) + .key(&name) + .upload_id(upload_id) + .part_number(part_number) + .body(chunk.clone().into()) + .send() + }); + + parts.push(fut); + part_number += 1; + } + + let completed_parts = join_all(parts) + .await + .into_iter() + .map(|join_result| join_result.unwrap()) + .collect::, _>>() + .map_err(ServerError::remote_file_error)? + .into_iter() + .enumerate() + .map(|(idx, part)| { + let part_number = idx + 1; + CompletedPart::builder() + .set_e_tag(part.e_tag().map(str::to_string)) + .set_part_number(Some(part_number as i32)) + .set_checksum_crc32(part.checksum_crc32().map(str::to_string)) + .set_checksum_crc32_c(part.checksum_crc32_c().map(str::to_string)) + .set_checksum_sha1(part.checksum_sha1().map(str::to_string)) + .set_checksum_sha256(part.checksum_sha256().map(str::to_string)) + .build() + }) + .collect::>(); + + let completed_multipart_upload = CompletedMultipartUpload::builder() + .set_parts(Some(completed_parts)) + .build(); + + let completion = self + .client + .complete_multipart_upload() + .bucket(&self.config.bucket) + .key(&name) + .upload_id(upload_id) + .multipart_upload(completed_multipart_upload) + .send() + .await + .map_err(ServerError::remote_file_error)?; + + tracing::debug!("complete_multipart_upload -> {:#?}", completion); + + cleanup.cancel(); + + Ok(RemoteFile::S3(S3RemoteFile { + region: self.config.region.clone(), + bucket: self.config.bucket.clone(), + key: name, + })) + } + + async fn delete_file(&self, name: String) -> ServerResult<()> { + let deletion = self + .client + .delete_object() + .bucket(&self.config.bucket) + .key(&name) + .send() + .await + .map_err(ServerError::remote_file_error)?; + + tracing::debug!("delete_file -> {:#?}", deletion); + + Ok(()) + } + + async fn delete_file_db(&self, file: &RemoteFile) -> ServerResult<()> { + let (client, file) = self.get_client_from_db_ref(file)?; + + let deletion = client + .delete_object() + .bucket(&file.bucket) + .key(&file.key) + .send() + .await + .map_err(ServerError::remote_file_error)?; + + tracing::debug!("delete_file -> {:#?}", deletion); + + Ok(()) + } + + async fn download_file(&self, name: String) -> ServerResult { + // FIXME: Configurable expiration + let presign_config = PresigningConfig::expires_in(Duration::from_secs(10)) + .map_err(ServerError::remote_file_error)?; + + let presigned = self + .client + .get_object() + .bucket(&self.config.bucket) + .key(&name) + .presigned(presign_config) + .await + .map_err(ServerError::remote_file_error)?; + + Ok(Download::Redirect(presigned.uri().to_string())) + } + + async fn download_file_db(&self, file: &RemoteFile) -> ServerResult { + let (client, file) = self.get_client_from_db_ref(file)?; + + let presign_config = PresigningConfig::expires_in(Duration::from_secs(600)) + .map_err(ServerError::remote_file_error)?; + + let presigned = client + .get_object() + .bucket(&file.bucket) + .key(&file.key) + .presigned(presign_config) + .await + .map_err(ServerError::remote_file_error)?; + + Ok(Download::Redirect(presigned.uri().to_string())) + } + + async fn make_db_reference(&self, name: String) -> ServerResult { + Ok(RemoteFile::S3(S3RemoteFile { + region: self.config.region.clone(), + bucket: self.config.bucket.clone(), + key: name, + })) + } +} + +// adapted from rust-s3 +async fn read_chunk_async(stream: &mut S) -> ServerResult> { + let mut chunk: Box<[u8]> = vec![0u8; CHUNK_SIZE].into_boxed_slice(); + let mut cursor = 0; + + while cursor < CHUNK_SIZE { + let buf = &mut chunk[cursor..]; + let read = stream + .read(buf) + .await + .map_err(ServerError::remote_file_error)?; + + if read == 0 { + break; + } else { + cursor += read; + } + } + + let mut vec = chunk.into_vec(); + vec.truncate(cursor); + + Ok(vec) +} diff --git a/shell.nix b/shell.nix new file mode 100644 index 0000000..9044128 --- /dev/null +++ b/shell.nix @@ -0,0 +1,3 @@ +let + flake = import ./flake-compat.nix; +in flake.shellNix